From: Kalle Valo Date: Tue, 17 Nov 2015 18:57:38 +0000 (+0200) Subject: iwlwifi: move under intel vendor directory X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e705c12146aa9c69ca498d4ebb83ba7138f9b41f;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git iwlwifi: move under intel vendor directory Part of reorganising wireless drivers directory and Kconfig. Signed-off-by: Kalle Valo --- diff --git a/MAINTAINERS b/MAINTAINERS index 83f9c2ba6962..d72be8c88138 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5651,7 +5651,7 @@ L: linux-wireless@vger.kernel.org W: http://intellinuxwireless.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported -F: drivers/net/wireless/iwlwifi/ +F: drivers/net/wireless/intel/iwlwifi/ INTEL MANAGEMENT ENGINE (mei) M: Tomas Winkler diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index f293831e2878..d71efe89970b 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -162,7 +162,6 @@ config MWL8K source "drivers/net/wireless/ath/Kconfig" source "drivers/net/wireless/hostap/Kconfig" -source "drivers/net/wireless/iwlwifi/Kconfig" source "drivers/net/wireless/libertas/Kconfig" source "drivers/net/wireless/orinoco/Kconfig" source "drivers/net/wireless/p54/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 135aa8753f69..6bcb2925a6ce 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -30,7 +30,6 @@ obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/ obj-$(CONFIG_MWL8K) += mwl8k.o -obj-$(CONFIG_IWLWIFI) += iwlwifi/ obj-$(CONFIG_RT2X00) += rt2x00/ obj-$(CONFIG_WL_MEDIATEK) += mediatek/ diff --git a/drivers/net/wireless/intel/Kconfig b/drivers/net/wireless/intel/Kconfig index 0a7cd61e528c..5b14f2f64a8a 100644 --- a/drivers/net/wireless/intel/Kconfig +++ b/drivers/net/wireless/intel/Kconfig @@ -13,5 +13,6 @@ if WLAN_VENDOR_INTEL source "drivers/net/wireless/intel/ipw2x00/Kconfig" source "drivers/net/wireless/intel/iwlegacy/Kconfig" +source "drivers/net/wireless/intel/iwlwifi/Kconfig" endif # WLAN_VENDOR_INTEL diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile index cec507d3c6bf..c9cbcc85b569 100644 --- a/drivers/net/wireless/intel/Makefile +++ b/drivers/net/wireless/intel/Makefile @@ -2,3 +2,5 @@ obj-$(CONFIG_IPW2100) += ipw2x00/ obj-$(CONFIG_IPW2200) += ipw2x00/ obj-$(CONFIG_IWLEGACY) += iwlegacy/ + +obj-$(CONFIG_IWLWIFI) += iwlwifi/ diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig new file mode 100644 index 000000000000..6e949df399d6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -0,0 +1,161 @@ +config IWLWIFI + tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " + depends on PCI && MAC80211 && HAS_IOMEM + select FW_LOADER + ---help--- + Select to build the driver supporting the: + + Intel Wireless WiFi Link Next-Gen AGN + + This option enables support for use with the following hardware: + Intel Wireless WiFi Link 6250AGN Adapter + Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) + Intel WiFi Link 1000BGN + Intel Wireless WiFi 5150AGN + Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN + Intel 6005 Series Wi-Fi Adapters + Intel 6030 Series Wi-Fi Adapters + Intel Wireless WiFi Link 6150BGN 2 Adapter + Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) + Intel 2000 Series Wi-Fi Adapters + Intel 7260 Wi-Fi Adapter + Intel 3160 Wi-Fi Adapter + Intel 7265 Wi-Fi Adapter + Intel 8260 Wi-Fi Adapter + Intel 3165 Wi-Fi Adapter + + + This driver uses the kernel's mac80211 subsystem. + + In order to use this driver, you will need a firmware + image for it. You can obtain the microcode from: + + . + + The firmware is typically installed in /lib/firmware. You can + look in the hotplug script /etc/hotplug/firmware.agent to + determine which directory FIRMWARE_DIR is set to when the script + runs. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The + module will be called iwlwifi. + +if IWLWIFI + +config IWLWIFI_LEDS + bool + depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI + select LEDS_TRIGGERS + select MAC80211_LEDS + default y + +config IWLDVM + tristate "Intel Wireless WiFi DVM Firmware support" + default IWLWIFI + help + This is the driver that supports the DVM firmware. The list + of the devices that use this firmware is available here: + https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware + +config IWLMVM + tristate "Intel Wireless WiFi MVM Firmware support" + select WANT_DEV_COREDUMP + help + This is the driver that supports the MVM firmware. The list + of the devices that use this firmware is available here: + https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware + +# don't call it _MODULE -- will confuse Kconfig/fixdep/... +config IWLWIFI_OPMODE_MODULAR + bool + default y if IWLDVM=m + default y if IWLMVM=m + +comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM" + depends on IWLDVM=n && IWLMVM=n + +config IWLWIFI_BCAST_FILTERING + bool "Enable broadcast filtering" + depends on IWLMVM + help + Say Y here to enable default bcast filtering configuration. + + Enabling broadcast filtering will drop any incoming wireless + broadcast frames, except some very specific predefined + patterns (e.g. incoming arp requests). + + If unsure, don't enable this option, as some programs might + expect incoming broadcasts for their normal operations. + +config IWLWIFI_UAPSD + bool "enable U-APSD by default" + depends on IWLMVM + help + Say Y here to enable U-APSD by default. This may cause + interoperability problems with some APs, manifesting in lower than + expected throughput due to those APs not enabling aggregation + + If unsure, say N. + +menu "Debugging Options" + +config IWLWIFI_DEBUG + bool "Enable full debugging output in the iwlwifi driver" + ---help--- + This option will enable debug tracing output for the iwlwifi drivers + + This will result in the kernel module being ~100k larger. You can + control which debug output is sent to the kernel log by setting the + value in + + /sys/module/iwlwifi/parameters/debug + + This entry will only exist if this option is enabled. + + To set a value, simply echo an 8-byte hex value to the same file: + + % echo 0x43fff > /sys/module/iwlwifi/parameters/debug + + You can find the list of debug mask values in: + drivers/net/wireless/iwlwifi/iwl-debug.h + + If this is your first time using this driver, you should say Y here + as the debug information can assist others in helping you resolve + any problems you may encounter. + +config IWLWIFI_DEBUGFS + bool "iwlwifi debugfs support" + depends on MAC80211_DEBUGFS + ---help--- + Enable creation of debugfs files for the iwlwifi drivers. This + is a low-impact option that allows getting insight into the + driver's state at runtime. + +config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE + bool "Experimental uCode support" + depends on IWLWIFI_DEBUG + ---help--- + Enable use of experimental ucode for testing and debugging. + +config IWLWIFI_DEVICE_TRACING + bool "iwlwifi device access tracing" + depends on EVENT_TRACING + default y + help + Say Y here to trace all commands, including TX frames and IO + accesses, sent to the device. If you say yes, iwlwifi will + register with the ftrace framework for event tracing and dump + all this information to the ringbuffer, you may need to + increase the ringbuffer size. See the ftrace documentation + for more information. + + When tracing is not enabled, this option still has some + (though rather small) overhead. + + If unsure, say Y so we can help you better when problems + occur. +endmenu + +endif diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile new file mode 100644 index 000000000000..dbfc5b18bcb7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -0,0 +1,23 @@ +# common +obj-$(CONFIG_IWLWIFI) += iwlwifi.o +iwlwifi-objs += iwl-io.o +iwlwifi-objs += iwl-drv.o +iwlwifi-objs += iwl-debug.o +iwlwifi-objs += iwl-notif-wait.o +iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o +iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o +iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o +iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o +iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o +iwlwifi-objs += iwl-trans.o + +iwlwifi-objs += $(iwlwifi-m) + +iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o + +ccflags-y += -D__CHECK_ENDIAN__ -I$(src) + +obj-$(CONFIG_IWLDVM) += dvm/ +obj-$(CONFIG_IWLMVM) += mvm/ + +CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile new file mode 100644 index 000000000000..4d19685f31c3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile @@ -0,0 +1,13 @@ +# DVM +obj-$(CONFIG_IWLDVM) += iwldvm.o +iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o +iwldvm-objs += lib.o calib.o tt.o sta.o rx.o + +iwldvm-objs += power.o +iwldvm-objs += scan.o +iwldvm-objs += rxon.o devices.o + +iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o +iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h new file mode 100644 index 000000000000..991def878881 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -0,0 +1,485 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_agn_h__ +#define __iwl_agn_h__ + +#include "iwl-config.h" + +#include "dev.h" + +/* The first 11 queues (0-10) are used otherwise */ +#define IWLAGN_FIRST_AMPDU_QUEUE 11 + +/* AUX (TX during scan dwell) queue */ +#define IWL_AUX_QUEUE 10 + +#define IWL_INVALID_STATION 255 + +/* device operations */ +extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_105_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg; + + +#define TIME_UNIT 1024 + +/***************************************************** +* DRIVER STATUS FUNCTIONS +******************************************************/ +#define STATUS_RF_KILL_HW 0 +#define STATUS_CT_KILL 1 +#define STATUS_ALIVE 2 +#define STATUS_READY 3 +#define STATUS_EXIT_PENDING 5 +#define STATUS_STATISTICS 6 +#define STATUS_SCANNING 7 +#define STATUS_SCAN_ABORTING 8 +#define STATUS_SCAN_HW 9 +#define STATUS_FW_ERROR 10 +#define STATUS_CHANNEL_SWITCH_PENDING 11 +#define STATUS_SCAN_COMPLETE 12 +#define STATUS_POWER_PMI 13 + +struct iwl_ucode_capabilities; + +extern const struct ieee80211_ops iwlagn_hw_ops; + +static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) +{ + hdr->op_code = cmd; + hdr->first_group = 0; + hdr->groups_num = 1; + hdr->data_valid = 1; +} + +void iwl_down(struct iwl_priv *priv); +void iwl_cancel_deferred_work(struct iwl_priv *priv); +void iwlagn_prepare_restart(struct iwl_priv *priv); +void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); + +bool iwl_check_for_ct_kill(struct iwl_priv *priv); + +void iwlagn_lift_passive_no_rx(struct iwl_priv *priv); + +/* MAC80211 */ +struct ieee80211_hw *iwl_alloc_all(void); +int iwlagn_mac_setup_register(struct iwl_priv *priv, + const struct iwl_ucode_capabilities *capa); +void iwlagn_mac_unregister(struct iwl_priv *priv); + +/* commands */ +int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); +int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, + u32 flags, u16 len, const void *data); + +/* RXON */ +void iwl_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwlagn_set_pan_params(struct iwl_priv *priv); +int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); +void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); +int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed); +void iwlagn_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +void iwlagn_config_ht40(struct ieee80211_conf *conf, + struct iwl_rxon_context *ctx); +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); +void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx); +void iwl_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif); + +/* uCode */ +int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); +void iwl_send_prio_tbl(struct iwl_priv *priv); +int iwl_init_alive_start(struct iwl_priv *priv); +int iwl_run_init_ucode(struct iwl_priv *priv); +int iwl_load_ucode_wait_alive(struct iwl_priv *priv, + enum iwl_ucode_type ucode_type); +int iwl_send_calib_results(struct iwl_priv *priv); +int iwl_calib_set(struct iwl_priv *priv, + const struct iwl_calib_hdr *cmd, int len); +void iwl_calib_free_results(struct iwl_priv *priv); +int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf); +int iwlagn_hw_valid_rtc_data_addr(u32 addr); + +/* lib */ +int iwlagn_send_tx_power(struct iwl_priv *priv); +void iwlagn_temperature(struct iwl_priv *priv); +int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk); +void iwlagn_dev_txfifo_flush(struct iwl_priv *priv); +int iwlagn_send_beacon_cmd(struct iwl_priv *priv); +int iwl_send_statistics_request(struct iwl_priv *priv, + u8 flags, bool clear); + +static inline const struct ieee80211_supported_band *iwl_get_hw_mode( + struct iwl_priv *priv, enum ieee80211_band band) +{ + return priv->hw->wiphy->bands[band]; +} + +#ifdef CONFIG_PM_SLEEP +int iwlagn_send_patterns(struct iwl_priv *priv, + struct cfg80211_wowlan *wowlan); +int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan); +#endif + +/* rx */ +int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +void iwl_setup_rx_handlers(struct iwl_priv *priv); +void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); + + +/* tx */ +int iwlagn_tx_skb(struct iwl_priv *priv, + struct ieee80211_sta *sta, + struct sk_buff *skb); +int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size); +int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); + +static inline u32 iwl_tx_status_to_mac80211(u32 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + return IEEE80211_TX_STAT_ACK; + case TX_STATUS_FAIL_DEST_PS: + case TX_STATUS_FAIL_PASSIVE_NO_RX: + return IEEE80211_TX_STAT_TX_FILTERED; + default: + return 0; + } +} + +static inline bool iwl_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) || + (status == TX_STATUS_DIRECT_DONE); +} + +u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); + +/* scan */ +void iwlagn_post_scan(struct iwl_priv *priv); +int iwl_force_rf_reset(struct iwl_priv *priv, bool external); +void iwl_init_scan_params(struct iwl_priv *priv); +int iwl_scan_cancel(struct iwl_priv *priv); +void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); +void iwl_force_scan_end(struct iwl_priv *priv); +void iwl_internal_short_hw_scan(struct iwl_priv *priv); +void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); +void iwl_setup_scan_deferred_work(struct iwl_priv *priv); +void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); +int __must_check iwl_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum iwl_scan_type scan_type, + enum ieee80211_band band); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + +#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15) + + +/* bt coex */ +void iwlagn_send_advance_bt_config(struct iwl_priv *priv); +void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv); +void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv); +void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); +void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv); +void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); + +static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) +{ + return priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_get_tx_fail_reason(u32 status); +const char *iwl_get_agg_tx_fail_reason(u16 status); +#else +static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; } +static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; } +#endif + + +/* station management */ +int iwlagn_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); +#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ +#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ +#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of + being activated */ +#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211; + (this is for the IBSS BSSID stations) */ +#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ + + +void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx); +void iwl_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_dealloc_bcast_stations(struct iwl_priv *priv); +int iwl_get_free_ucode_key_offset(struct iwl_priv *priv); +int iwl_send_add_sta(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags); +int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r); +int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr); +void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr); +u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta); + +int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, u8 flags, bool init); +void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); +int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_sta *sta); + +bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta *sta); + +static inline int iwl_sta_id(struct ieee80211_sta *sta) +{ + if (WARN_ON(!sta)) + return IWL_INVALID_STATION; + + return ((struct iwl_station_priv *)sta->drv_priv)->sta_id; +} + +int iwlagn_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r); +int iwl_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, + struct ieee80211_sta *sta); +int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, + struct ieee80211_sta *sta); +void iwl_update_tkip_key(struct iwl_priv *priv, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); +int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); +int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn); +int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid); +void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); +int iwl_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_update_bcast_stations(struct iwl_priv *priv); + +/* rate */ +static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) +{ + return BIT(ant_idx) << RATE_MCS_ANT_POS; +} + +static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK; +} + +static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) +{ + return cpu_to_le32(flags|(u32)rate); +} + +int iwl_alive_start(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_DEBUG +void iwl_print_rx_config_cmd(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid); +#else +static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ +} +#endif + +/* status checks */ + +static inline int iwl_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY EXIT_PENDING is not set */ + return test_bit(STATUS_READY, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_is_rfkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status); +} + +static inline int iwl_is_ctkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_CT_KILL, &priv->status); +} + +static inline int iwl_is_ready_rf(struct iwl_priv *priv) +{ + if (iwl_is_rfkill(priv)) + return 0; + + return iwl_is_ready(priv); +} + +static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) +{ + if (state) + set_bit(STATUS_POWER_PMI, &priv->status); + else + clear_bit(STATUS_POWER_PMI, &priv->status); + iwl_trans_set_pmi(priv->trans, state); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); +#else +static inline int iwl_dbgfs_register(struct iwl_priv *priv, + struct dentry *dbgfs_dir) +{ + return 0; +} +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + +#ifdef CONFIG_IWLWIFI_DEBUG +#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ +do { \ + if (!iwl_is_rfkill((m))) \ + IWL_ERR(m, fmt, ##args); \ + else \ + __iwl_err((m)->dev, true, \ + !iwl_have_debug_level(IWL_DL_RADIO), \ + fmt, ##args); \ +} while (0) +#else +#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ +do { \ + if (!iwl_is_rfkill((m))) \ + IWL_ERR(m, fmt, ##args); \ + else \ + __iwl_err((m)->dev, true, true, fmt, ##args); \ +} while (0) +#endif /* CONFIG_IWLWIFI_DEBUG */ + +extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1]; + +static inline const char *iwl_dvm_get_cmd_string(u8 cmd) +{ + const char *s = iwl_dvm_cmd_strings[cmd]; + if (s) + return s; + return "UNKNOWN"; +} +#endif /* __iwl_agn_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c new file mode 100644 index 000000000000..20e6aa910700 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c @@ -0,0 +1,1113 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#include +#include + +#include "iwl-trans.h" + +#include "dev.h" +#include "calib.h" +#include "agn.h" + +/***************************************************************************** + * INIT calibrations framework + *****************************************************************************/ + +/* Opaque calibration results */ +struct iwl_calib_result { + struct list_head list; + size_t cmd_len; + struct iwl_calib_hdr hdr; + /* data follows */ +}; + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +int iwl_send_calib_results(struct iwl_priv *priv) +{ + struct iwl_host_cmd hcmd = { + .id = REPLY_PHY_CALIBRATION_CMD, + }; + struct iwl_calib_result *res; + + list_for_each_entry(res, &priv->calib_results, list) { + int ret; + + hcmd.len[0] = res->cmd_len; + hcmd.data[0] = &res->hdr; + hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + ret = iwl_dvm_send_cmd(priv, &hcmd); + if (ret) { + IWL_ERR(priv, "Error %d on calib cmd %d\n", + ret, res->hdr.op_code); + return ret; + } + } + + return 0; +} + +int iwl_calib_set(struct iwl_priv *priv, + const struct iwl_calib_hdr *cmd, int len) +{ + struct iwl_calib_result *res, *tmp; + + res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr), + GFP_ATOMIC); + if (!res) + return -ENOMEM; + memcpy(&res->hdr, cmd, len); + res->cmd_len = len; + + list_for_each_entry(tmp, &priv->calib_results, list) { + if (tmp->hdr.op_code == res->hdr.op_code) { + list_replace(&tmp->list, &res->list); + kfree(tmp); + return 0; + } + } + + /* wasn't in list already */ + list_add_tail(&res->list, &priv->calib_results); + + return 0; +} + +void iwl_calib_free_results(struct iwl_priv *priv) +{ + struct iwl_calib_result *res, *tmp; + + list_for_each_entry_safe(res, tmp, &priv->calib_results, list) { + list_del(&res->list); + kfree(res); + } +} + +/***************************************************************************** + * RUNTIME calibrations framework + *****************************************************************************/ + +/* "false alarms" are signals that our DSP tries to lock onto, + * but then determines that they are either noise, or transmissions + * from a distant wireless network (also "noise", really) that get + * "stepped on" by stronger transmissions within our own network. + * This algorithm attempts to set a sensitivity level that is high + * enough to receive all of our own network traffic, but not so + * high that our DSP gets too busy trying to lock onto non-network + * activity/noise. */ +static int iwl_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) +{ + u32 max_nrg_cck = 0; + int i = 0; + u8 max_silence_rssi = 0; + u32 silence_ref = 0; + u8 silence_rssi_a = 0; + u8 silence_rssi_b = 0; + u8 silence_rssi_c = 0; + u32 val; + + /* "false_alarms" values below are cross-multiplications to assess the + * numbers of false alarms within the measured period of actual Rx + * (Rx is off when we're txing), vs the min/max expected false alarms + * (some should be expected if rx is sensitive enough) in a + * hypothetical listening period of 200 time units (TU), 204.8 msec: + * + * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time + * + * */ + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; + u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + data->nrg_auto_corr_silence_diff = 0; + + /* Find max silence rssi among all 3 receivers. + * This is background noise, which may include transmissions from other + * networks, measured during silence before our network's beacon */ + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER) >> 8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER) >> 8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER) >> 8); + + val = max(silence_rssi_b, silence_rssi_c); + max_silence_rssi = max(silence_rssi_a, (u8) val); + + /* Store silence rssi in 20-beacon history table */ + data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; + data->nrg_silence_idx++; + if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) + data->nrg_silence_idx = 0; + + /* Find max silence rssi across 20 beacon history */ + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { + val = data->nrg_silence_rssi[i]; + silence_ref = max(silence_ref, val); + } + IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); + + /* Find max rx energy (min value!) among all 3 receivers, + * measured during beacon frame. + * Save it in 10-beacon history table. */ + i = data->nrg_energy_idx; + val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); + data->nrg_value[i] = min(rx_info->beacon_energy_a, val); + + data->nrg_energy_idx++; + if (data->nrg_energy_idx >= 10) + data->nrg_energy_idx = 0; + + /* Find min rx energy (max value) across 10 beacon history. + * This is the minimum signal level that we want to receive well. + * Add backoff (margin so we don't miss slightly lower energy frames). + * This establishes an upper bound (min value) for energy threshold. */ + max_nrg_cck = data->nrg_value[0]; + for (i = 1; i < 10; i++) + max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); + max_nrg_cck += 6; + + IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); + + /* Count number of consecutive beacons with fewer-than-desired + * false alarms. */ + if (false_alarms < min_false_alarms) + data->num_in_cck_no_fa++; + else + data->num_in_cck_no_fa = 0; + IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); + + /* If we got too many false alarms this time, reduce sensitivity */ + if ((false_alarms > max_false_alarms) && + (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; + /* Store for "fewer than desired" on later beacon */ + data->nrg_silence_ref = silence_ref; + + /* increase energy threshold (reduce nrg value) + * to decrease sensitivity */ + data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; + /* Else if we got fewer than desired, increase sensitivity */ + } else if (false_alarms < min_false_alarms) { + data->nrg_curr_state = IWL_FA_TOO_FEW; + + /* Compare silence level with silence level for most recent + * healthy number or too many false alarms */ + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; + + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); + + /* Increase value to increase sensitivity, but only if: + * 1a) previous beacon did *not* have *too many* false alarms + * 1b) AND there's a significant difference in Rx levels + * from a previous beacon with too many, or healthy # FAs + * OR 2) We've seen a lot of beacons (100) with too few + * false alarms */ + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); + /* Increase nrg value to increase sensitivity */ + val = data->nrg_th_cck + NRG_STEP_CCK; + data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); + } else { + IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n"); + } + + /* Else we got a healthy number of false alarms, keep status quo */ + } else { + IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; + + /* Store for use in "fewer than desired" with later beacon */ + data->nrg_silence_ref = silence_ref; + + /* If previous beacon had too many false alarms, + * give it some extra margin by reducing sensitivity again + * (but don't go below measured energy of desired Rx) */ + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB(priv, "... increasing margin\n"); + if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) + data->nrg_th_cck -= NRG_MARGIN; + else + data->nrg_th_cck = max_nrg_cck; + } + } + + /* Make sure the energy threshold does not go above the measured + * energy of the desired Rx signals (reduced by backoff margin), + * or else we might start missing Rx frames. + * Lower value is higher energy, so we use max()! + */ + data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); + IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); + + data->nrg_prev_state = data->nrg_curr_state; + + /* Auto-correlation CCK algorithm */ + if (false_alarms > min_false_alarms) { + + /* increase auto_corr values to decrease sensitivity + * so the DSP won't be disturbed by the noise + */ + if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) + data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; + else { + val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + min((u32)ranges->auto_corr_max_cck, val); + } + val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + min((u32)ranges->auto_corr_max_cck_mrc, val); + } else if ((false_alarms < min_false_alarms) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + /* Decrease auto_corr values to increase sensitivity */ + val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + max((u32)ranges->auto_corr_min_cck, val); + val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + max((u32)ranges->auto_corr_min_cck_mrc, val); + } + + return 0; +} + + +static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) +{ + u32 val; + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; + u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); + + val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + min((u32)ranges->auto_corr_max_ofdm, val); + + val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + min((u32)ranges->auto_corr_max_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + min((u32)ranges->auto_corr_max_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); + } + + /* Else if we got fewer than desired, increase sensitivity */ + else if (false_alarms < min_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); + + val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + max((u32)ranges->auto_corr_min_ofdm, val); + + val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + max((u32)ranges->auto_corr_min_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + max((u32)ranges->auto_corr_min_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); + } else { + IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); + } + return 0; +} + +static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, + struct iwl_sensitivity_data *data, + __le16 *tbl) +{ + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + cpu_to_le16(data->barker_corr_th_min); + tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16(data->barker_corr_th_min_mrc); + tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = + cpu_to_le16(data->nrg_th_cca); + + IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = { sizeof(struct iwl_sensitivity_cmd), }, + .flags = CMD_ASYNC, + .data = { &cmd, }, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); + + /* Update uCode's "work" table, and copy it to DSP */ + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); + + return iwl_dvm_send_cmd(priv, &cmd_out); +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_enhance_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = { sizeof(struct iwl_enhance_sensitivity_cmd), }, + .flags = CMD_ASYNC, + .data = { &cmd, }, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); + + if (priv->lib->hd_v2) { + cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = + HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; + cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = + HD_INA_NON_SQUARE_DET_CCK_DATA_V2; + cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] = + HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] = + HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = + HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] = + HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] = + HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] = + HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = + HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] = + HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] = + HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2; + } else { + cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = + HD_INA_NON_SQUARE_DET_OFDM_DATA_V1; + cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = + HD_INA_NON_SQUARE_DET_CCK_DATA_V1; + cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] = + HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] = + HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = + HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] = + HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] = + HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] = + HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = + HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] = + HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] = + HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1; + } + + /* Update uCode's "work" table, and copy it to DSP */ + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE) && + !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX], + &(priv->enhance_sensitivity_tbl[0]), + sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]), + sizeof(u16)*HD_TABLE_SIZE); + memcpy(&(priv->enhance_sensitivity_tbl[0]), + &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]), + sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES); + + return iwl_dvm_send_cmd(priv, &cmd_out); +} + +void iwl_init_sensitivity(struct iwl_priv *priv) +{ + int ret = 0; + int i; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED) + return; + + IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n"); + + /* Clear driver's sensitivity algo data */ + data = &(priv->sensitivity_data); + + if (ranges == NULL) + return; + + memset(data, 0, sizeof(struct iwl_sensitivity_data)); + + data->num_in_cck_no_fa = 0; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_silence_ref = 0; + data->nrg_silence_idx = 0; + data->nrg_energy_idx = 0; + + for (i = 0; i < 10; i++) + data->nrg_value[i] = 0; + + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) + data->nrg_silence_rssi[i] = 0; + + data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; + data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; + data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; + data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; + data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; + data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; + data->nrg_th_cck = ranges->nrg_th_cck; + data->nrg_th_ofdm = ranges->nrg_th_ofdm; + data->barker_corr_th_min = ranges->barker_corr_th_min; + data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc; + data->nrg_th_cca = ranges->nrg_th_cca; + + data->last_bad_plcp_cnt_ofdm = 0; + data->last_fa_cnt_ofdm = 0; + data->last_bad_plcp_cnt_cck = 0; + data->last_fa_cnt_cck = 0; + + if (priv->fw->enhance_sensitivity_table) + ret |= iwl_enhance_sensitivity_write(priv); + else + ret |= iwl_sensitivity_write(priv); + IWL_DEBUG_CALIB(priv, "<calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED) + return; + + data = &(priv->sensitivity_data); + + if (!iwl_is_any_associated(priv)) { + IWL_DEBUG_CALIB(priv, "<< - not associated\n"); + return; + } + + spin_lock_bh(&priv->statistics.lock); + rx_info = &priv->statistics.rx_non_phy; + ofdm = &priv->statistics.rx_ofdm; + cck = &priv->statistics.rx_cck; + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); + spin_unlock_bh(&priv->statistics.lock); + return; + } + + /* Extract Statistics: */ + rx_enable_time = le32_to_cpu(rx_info->channel_load); + fa_cck = le32_to_cpu(cck->false_alarm_cnt); + fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(cck->plcp_err); + bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); + + statis.beacon_silence_rssi_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a); + statis.beacon_silence_rssi_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b); + statis.beacon_silence_rssi_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(rx_info->beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(rx_info->beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(rx_info->beacon_energy_c); + + spin_unlock_bh(&priv->statistics.lock); + + IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); + + if (!rx_enable_time) { + IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); + return; + } + + /* These statistics increase monotonically, and do not reset + * at each beacon. Calculate difference from last value, or just + * use the new statistics value if it has reset or wrapped around. */ + if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) + data->last_bad_plcp_cnt_cck = bad_plcp_cck; + else { + bad_plcp_cck -= data->last_bad_plcp_cnt_cck; + data->last_bad_plcp_cnt_cck += bad_plcp_cck; + } + + if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) + data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; + else { + bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; + data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; + } + + if (data->last_fa_cnt_ofdm > fa_ofdm) + data->last_fa_cnt_ofdm = fa_ofdm; + else { + fa_ofdm -= data->last_fa_cnt_ofdm; + data->last_fa_cnt_ofdm += fa_ofdm; + } + + if (data->last_fa_cnt_cck > fa_cck) + data->last_fa_cnt_cck = fa_cck; + else { + fa_cck -= data->last_fa_cnt_cck; + data->last_fa_cnt_cck += fa_cck; + } + + /* Total aborted signal locks */ + norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; + norm_fa_cck = fa_cck + bad_plcp_cck; + + IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + + iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + if (priv->fw->enhance_sensitivity_table) + iwl_enhance_sensitivity_write(priv); + else + iwl_sensitivity_write(priv); +} + +static inline u8 find_first_chain(u8 mask) +{ + if (mask & ANT_A) + return CHAIN_A; + if (mask & ANT_B) + return CHAIN_B; + return CHAIN_C; +} + +/** + * Run disconnected antenna algorithm to find out which antennas are + * disconnected. + */ +static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, + struct iwl_chain_noise_data *data) +{ + u32 active_chains = 0; + u32 max_average_sig; + u16 max_average_sig_antenna_i; + u8 num_tx_chains; + u8 first_chain; + u16 i = 0; + + average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS; + average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS; + average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS; + + if (average_sig[0] >= average_sig[1]) { + max_average_sig = average_sig[0]; + max_average_sig_antenna_i = 0; + active_chains = (1 << max_average_sig_antenna_i); + } else { + max_average_sig = average_sig[1]; + max_average_sig_antenna_i = 1; + active_chains = (1 << max_average_sig_antenna_i); + } + + if (average_sig[2] >= max_average_sig) { + max_average_sig = average_sig[2]; + max_average_sig_antenna_i = 2; + active_chains = (1 << max_average_sig_antenna_i); + } + + IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); + + /* Compare signal strengths for all 3 receivers. */ + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (i != max_average_sig_antenna_i) { + s32 rssi_delta = (max_average_sig - average_sig[i]); + + /* If signal is very weak, compared with + * strongest, mark it as disconnected. */ + if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) + data->disconn_array[i] = 1; + else + active_chains |= (1 << i); + IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); + } + } + + /* + * The above algorithm sometimes fails when the ucode + * reports 0 for all chains. It's not clear why that + * happens to start with, but it is then causing trouble + * because this can make us enable more chains than the + * hardware really has. + * + * To be safe, simply mask out any chains that we know + * are not on the device. + */ + active_chains &= priv->nvm_data->valid_rx_ant; + + num_tx_chains = 0; + for (i = 0; i < NUM_RX_CHAINS; i++) { + /* loops on all the bits of + * priv->hw_setting.valid_tx_ant */ + u8 ant_msk = (1 << i); + if (!(priv->nvm_data->valid_tx_ant & ant_msk)) + continue; + + num_tx_chains++; + if (data->disconn_array[i] == 0) + /* there is a Tx antenna connected */ + break; + if (num_tx_chains == priv->hw_params.tx_chains_num && + data->disconn_array[i]) { + /* + * If all chains are disconnected + * connect the first valid tx chain + */ + first_chain = + find_first_chain(priv->nvm_data->valid_tx_ant); + data->disconn_array[first_chain] = 0; + active_chains |= BIT(first_chain); + IWL_DEBUG_CALIB(priv, + "All Tx chains are disconnected W/A - declare %d as connected\n", + first_chain); + break; + } + } + + if (active_chains != priv->nvm_data->valid_rx_ant && + active_chains != priv->chain_noise_data.active_chains) + IWL_DEBUG_CALIB(priv, + "Detected that not all antennas are connected! " + "Connected: %#x, valid: %#x.\n", + active_chains, + priv->nvm_data->valid_rx_ant); + + /* Save for use within RXON, TX, SCAN commands, etc. */ + data->active_chains = active_chains; + IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", + active_chains); +} + +static void iwlagn_gain_computation(struct iwl_priv *priv, + u32 average_noise[NUM_RX_CHAINS], + u8 default_chain) +{ + int i; + s32 delta_g; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + + /* + * Find Gain Code for the chains based on "default chain" + */ + for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { + if ((data->disconn_array[i])) { + data->delta_gain_code[i] = 0; + continue; + } + + delta_g = (priv->lib->chain_noise_scale * + ((s32)average_noise[default_chain] - + (s32)average_noise[i])) / 1500; + + /* bound gain by 2 bits value max, 3rd bit is sign */ + data->delta_gain_code[i] = + min(abs(delta_g), + (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + + if (delta_g < 0) + /* + * set negative sign ... + * note to Intel developers: This is uCode API format, + * not the format of any internal device registers. + * Do not change this format for e.g. 6050 or similar + * devices. Change format only if more resolution + * (i.e. more than 2 bits magnitude) is needed. + */ + data->delta_gain_code[i] |= (1 << 2); + } + + IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", + data->delta_gain_code[1], data->delta_gain_code[2]); + + if (!data->radio_write) { + struct iwl_calib_chain_noise_gain_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + + iwl_set_calib_hdr(&cmd.hdr, + priv->phy_calib_chain_noise_gain_cmd); + cmd.delta_gain_1 = data->delta_gain_code[1]; + cmd.delta_gain_2 = data->delta_gain_code[2]; + iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + CMD_ASYNC, sizeof(cmd), &cmd); + + data->radio_write = 1; + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } +} + +/* + * Accumulate 16 beacons of signal and noise statistics for each of + * 3 receivers/antennas/rx-chains, then figure out: + * 1) Which antennas are connected. + * 2) Differential rx gain settings to balance the 3 receivers. + */ +void iwl_chain_noise_calibration(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = NULL; + + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_sig_a; + u32 chain_sig_b; + u32 chain_sig_c; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; + u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; + u16 i = 0; + u16 rxon_chnum = INITIALIZATION_VALUE; + u16 stat_chnum = INITIALIZATION_VALUE; + u8 rxon_band24; + u8 stat_band24; + struct statistics_rx_non_phy *rx_info; + + /* + * MULTI-FIXME: + * When we support multiple interfaces on different channels, + * this must be modified/fixed. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) + return; + + data = &(priv->chain_noise_data); + + /* + * Accumulate just the first "chain_noise_num_beacons" after + * the first association, then we're done forever. + */ + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); + return; + } + + spin_lock_bh(&priv->statistics.lock); + + rx_info = &priv->statistics.rx_non_phy; + + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); + spin_unlock_bh(&priv->statistics.lock); + return; + } + + rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); + rxon_chnum = le16_to_cpu(ctx->staging.channel); + stat_band24 = + !!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16; + + /* Make sure we accumulate data for just the associated channel + * (even if scanning). */ + if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { + IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", + rxon_chnum, rxon_band24); + spin_unlock_bh(&priv->statistics.lock); + return; + } + + /* + * Accumulate beacon statistics values across + * "chain_noise_num_beacons" + */ + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; + + chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; + chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; + chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; + + spin_unlock_bh(&priv->statistics.lock); + + data->beacon_count++; + + data->chain_noise_a = (chain_noise_a + data->chain_noise_a); + data->chain_noise_b = (chain_noise_b + data->chain_noise_b); + data->chain_noise_c = (chain_noise_c + data->chain_noise_c); + + data->chain_signal_a = (chain_sig_a + data->chain_signal_a); + data->chain_signal_b = (chain_sig_b + data->chain_signal_b); + data->chain_signal_c = (chain_sig_c + data->chain_signal_c); + + IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", + rxon_chnum, rxon_band24, data->beacon_count); + IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); + + /* If this is the "chain_noise_num_beacons", determine: + * 1) Disconnected antennas (using signal strengths) + * 2) Differential gain (using silence noise) to balance receivers */ + if (data->beacon_count != IWL_CAL_NUM_BEACONS) + return; + + /* Analyze signal for disconnected antenna */ + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + /* Disable disconnected antenna algorithm for advanced + bt coex, assuming valid antennas are connected */ + data->active_chains = priv->nvm_data->valid_rx_ant; + for (i = 0; i < NUM_RX_CHAINS; i++) + if (!(data->active_chains & (1<disconn_array[i] = 1; + } else + iwl_find_disconn_antenna(priv, average_sig, data); + + /* Analyze noise for rx balance */ + average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS; + average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS; + average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS; + + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { + /* This means that chain i is active and has + * lower noise values so far: */ + min_average_noise = average_noise[i]; + min_average_noise_antenna_i = i; + } + } + + IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); + + IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); + + iwlagn_gain_computation( + priv, average_noise, + find_first_chain(priv->nvm_data->valid_rx_ant)); + + /* Some power changes may have been made during the calibration. + * Update and commit the RXON + */ + iwl_update_chain_flags(priv); + + data->state = IWL_CHAIN_NOISE_DONE; + iwl_power_update_mode(priv, false); +} + +void iwl_reset_run_time_calib(struct iwl_priv *priv) +{ + int i; + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); + for (i = 0; i < NUM_RX_CHAINS; i++) + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; + + /* Ask for statistics now, the uCode will send notification + * periodically after association */ + iwl_send_statistics_request(priv, CMD_ASYNC, true); +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h new file mode 100644 index 000000000000..aeae4e80ea40 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h @@ -0,0 +1,74 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#ifndef __iwl_calib_h__ +#define __iwl_calib_h__ + +#include "dev.h" +#include "commands.h" + +void iwl_chain_noise_calibration(struct iwl_priv *priv); +void iwl_sensitivity_calibration(struct iwl_priv *priv); + +void iwl_init_sensitivity(struct iwl_priv *priv); +void iwl_reset_run_time_calib(struct iwl_priv *priv); + +#endif /* __iwl_calib_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h new file mode 100644 index 000000000000..7a34e4d158d1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -0,0 +1,4008 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (commands.h) only for uCode API definitions. + * Please use iwl-xxxx-hw.h for hardware-related definitions. + * Please use dev.h for driver implementation definitions. + */ + +#ifndef __iwl_commands_h__ +#define __iwl_commands_h__ + +#include +#include + + +enum { + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, + REPLY_ECHO = 0x3, /* test command */ + + /* RXON and QOS commands */ + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, + + /* Multi-Station support */ + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, + REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ + REPLY_TXFIFO_FLUSH = 0x1e, + + /* Security */ + REPLY_WEPKEY = 0x20, + + /* RX, TX, LEDs */ + REPLY_TX = 0x1c, + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, + + /* WiMAX coexistence */ + COEX_PRIORITY_TABLE_CMD = 0x5a, + COEX_MEDIUM_NOTIFICATION = 0x5b, + COEX_EVENT_CMD = 0x5c, + + /* Calibration */ + TEMPERATURE_NOTIFICATION = 0x62, + CALIBRATION_CFG_CMD = 0x65, + CALIBRATION_RES_NOTIFICATION = 0x66, + CALIBRATION_COMPLETE_NOTIFICATION = 0x67, + + /* 802.11h related */ + REPLY_QUIET_CMD = 0x71, /* not used */ + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, + + /* Power Management */ + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + + /* Scan commands and notifications */ + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* IBSS/AP commands */ + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, + WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */ + + /* Miscellaneous commands */ + REPLY_TX_POWER_DBM_CMD = 0x95, + QUIET_NOTIFICATION = 0x96, /* not used */ + REPLY_TX_PWR_TABLE_CMD = 0x97, + REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */ + TX_ANT_CONFIGURATION_CMD = 0x98, + MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ + + /* Bluetooth device coexistence config command */ + REPLY_BT_CONFIG = 0x9b, + + /* Statistics */ + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + REPLY_CARD_STATE_CMD = 0xa0, + CARD_STATE_NOTIFICATION = 0xa1, + + /* Missed beacons notification */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, + + /* BT Coex */ + REPLY_BT_COEX_PRIO_TABLE = 0xcc, + REPLY_BT_COEX_PROT_ENV = 0xcd, + REPLY_BT_COEX_PROFILE_NOTIF = 0xce, + + /* PAN commands */ + REPLY_WIPAN_PARAMS = 0xb2, + REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */ + REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */ + REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */ + REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */ + REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */ + REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9, + REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, + REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd, + + REPLY_WOWLAN_PATTERNS = 0xe0, + REPLY_WOWLAN_WAKEUP_FILTER = 0xe1, + REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2, + REPLY_WOWLAN_TKIP_PARAMS = 0xe3, + REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4, + REPLY_WOWLAN_GET_STATUS = 0xe5, + REPLY_D3_CONFIG = 0xd3, + + REPLY_MAX = 0xff +}; + +/* + * Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate + * - 4 standard TX queues + * - the command queue + * - 4 PAN TX queues + * - the PAN multicast queue, and + * - the AUX (TX during scan dwell) queue. + */ +#define IWL_MIN_NUM_QUEUES 11 + +/* + * Command queue depends on iPAN support. + */ +#define IWL_DEFAULT_CMD_QUEUE_NUM 4 +#define IWL_IPAN_CMD_QUEUE_NUM 9 + +#define IWL_TX_FIFO_BK 0 /* shared */ +#define IWL_TX_FIFO_BE 1 +#define IWL_TX_FIFO_VI 2 /* shared */ +#define IWL_TX_FIFO_VO 3 +#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK +#define IWL_TX_FIFO_BE_IPAN 4 +#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI +#define IWL_TX_FIFO_VO_IPAN 5 +/* re-uses the VO FIFO, uCode will properly flush/schedule */ +#define IWL_TX_FIFO_AUX 5 +#define IWL_TX_FIFO_UNUSED 255 + +#define IWLAGN_CMD_FIFO_NUM 7 + +/* + * This queue number is required for proper operation + * because the ucode will stop/start the scheduler as + * required. + */ +#define IWL_IPAN_MCAST_QUEUE 8 + +/****************************************************************************** + * (0) + * Commonly used structures and definitions: + * Command header, rate_n_flags, txpower + * + *****************************************************************************/ + +/** + * iwlagn rate_n_flags bit fields + * + * rate_n_flags format is used in following iwlagn commands: + * REPLY_RX (response only) + * REPLY_RX_MPDU (response only) + * REPLY_TX (both command and response) + * REPLY_TX_LINK_QUALITY_CMD + * + * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): + * 2-0: 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * + * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"): + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * + * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + */ +#define RATE_MCS_CODE_MSK 0x7 +#define RATE_MCS_SPATIAL_POS 3 +#define RATE_MCS_SPATIAL_MSK 0x18 +#define RATE_MCS_HT_DUP_POS 5 +#define RATE_MCS_HT_DUP_MSK 0x20 +/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */ +#define RATE_MCS_RATE_MSK 0xff + +/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */ +#define RATE_MCS_FLAGS_POS 8 +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK 0x100 + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK 0x200 + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_MCS_GF_POS 10 +#define RATE_MCS_GF_MSK 0x400 + +/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */ +#define RATE_MCS_HT40_POS 11 +#define RATE_MCS_HT40_MSK 0x800 + +/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */ +#define RATE_MCS_DUP_POS 12 +#define RATE_MCS_DUP_MSK 0x1000 + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK 0x2000 + +/** + * rate_n_flags Tx antenna masks + * 4965 has 2 transmitters + * 5100 has 1 transmitter B + * 5150 has 1 transmitter A + * 5300 has 3 transmitters + * 5350 has 3 transmitters + * bit14:16 + */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK 0x04000 +#define RATE_MCS_ANT_B_MSK 0x08000 +#define RATE_MCS_ANT_C_MSK 0x10000 +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) +#define RATE_ANT_NUM 3 + +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 + +#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 +#define IWL_PWR_CCK_ENTRIES 2 + +/** + * struct tx_power_dual_stream + * + * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * + * Same format as iwl_tx_power_dual_stream, but __le32 + */ +struct tx_power_dual_stream { + __le32 dw; +} __packed; + +/** + * Command REPLY_TX_POWER_DBM_CMD = 0x98 + * struct iwlagn_tx_power_dbm_cmd + */ +#define IWLAGN_TX_POWER_AUTO 0x7f +#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6) + +struct iwlagn_tx_power_dbm_cmd { + s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ + u8 flags; + s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ + u8 reserved; +} __packed; + +/** + * Command TX_ANT_CONFIGURATION_CMD = 0x98 + * This command is used to configure valid Tx antenna. + * By default uCode concludes the valid antenna according to the radio flavor. + * This command enables the driver to override/modify this conclusion. + */ +struct iwl_tx_ant_config_cmd { + __le32 valid; +} __packed; + +/****************************************************************************** + * (0a) + * Alive and Error Commands & Responses: + * + *****************************************************************************/ + +#define UCODE_VALID_OK cpu_to_le32(0x1) + +/** + * REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "alive" notification once the runtime image is ready + * to receive commands from the driver. This is the *second* "alive" + * notification that the driver will receive after rebooting uCode; + * this "alive" is indicated by subtype field != 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * This response includes two pointers to structures within the device's + * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging: + * + * 1) log_event_table_ptr indicates base of the event log. This traces + * a 256-entry history of uCode execution within a circular buffer. + * Its header format is: + * + * __le32 log_size; log capacity (in number of entries) + * __le32 type; (1) timestamp with each entry, (0) no timestamp + * __le32 wraps; # times uCode has wrapped to top of circular buffer + * __le32 write_index; next circular buffer entry that uCode would fill + * + * The header is followed by the circular buffer of log entries. Entries + * with timestamps have the following format: + * + * __le32 event_id; range 0 - 1500 + * __le32 timestamp; low 32 bits of TSF (of network, if associated) + * __le32 data; event_id-specific data value + * + * Entries without timestamps contain only event_id and data. + * + * + * 2) error_event_table_ptr indicates base of the error log. This contains + * information about any uCode error that occurs. For agn, the format + * of the error log is defined by struct iwl_error_event_table. + * + * The Linux driver can print both logs to the system log when a uCode error + * occurs. + */ + +/* + * Note: This structure is read from the device with IO accesses, + * and the reading already does the endian conversion. As it is + * read with u32-sized accesses, any members with a different size + * need to be ordered correctly though! + */ +struct iwl_error_event_table { + u32 valid; /* (nonzero) valid, (0) log is empty */ + u32 error_id; /* type of error */ + u32 pc; /* program counter */ + u32 blink1; /* branch link */ + u32 blink2; /* branch link */ + u32 ilink1; /* interrupt link */ + u32 ilink2; /* interrupt link */ + u32 data1; /* error-specific data */ + u32 data2; /* error-specific data */ + u32 line; /* source code line of error */ + u32 bcon_time; /* beacon timer */ + u32 tsf_low; /* network timestamp function timer */ + u32 tsf_hi; /* network timestamp function timer */ + u32 gp1; /* GP1 timer register */ + u32 gp2; /* GP2 timer register */ + u32 gp3; /* GP3 timer register */ + u32 ucode_ver; /* uCode version */ + u32 hw_ver; /* HW Silicon version */ + u32 brd_ver; /* HW board version */ + u32 log_pc; /* log program counter */ + u32 frame_ptr; /* frame pointer */ + u32 stack_ptr; /* stack pointer */ + u32 hcmd; /* last host command header */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: + * rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: + * host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: + * enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: + * time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: + * wico interrupt */ + u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 wait_event; /* wait event() caller address */ + u32 l2p_control; /* L2pControlField */ + u32 l2p_duration; /* L2pDurationField */ + u32 l2p_mhvalid; /* L2pMhValidBits */ + u32 l2p_addr_match; /* L2pAddrMatchStat */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on + * (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the + * compilation */ + u32 flow_handler; /* FH read/write pointers, RX credit */ +} __packed; + +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* not "9" for runtime alive */ + __le16 reserved2; + __le32 log_event_table_ptr; /* SRAM address for event log */ + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 timestamp; + __le32 is_valid; +} __packed; + +/* + * REPLY_ERROR = 0x2 (response only, not a command) + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_info; + __le64 timestamp; +} __packed; + +/****************************************************************************** + * (1) + * RXON Commands & Responses: + * + *****************************************************************************/ + +/* + * Rx config defines & structure + */ +/* rx_config device types */ +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, + RXON_DEV_TYPE_CP = 7, + RXON_DEV_TYPE_2STA = 8, + RXON_DEV_TYPE_P2P = 9, +}; + + +#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) +#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) +#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) +#define RXON_RX_CHAIN_VALID_POS (1) +#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4) +#define RXON_RX_CHAIN_FORCE_SEL_POS (4) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10) +#define RXON_RX_CHAIN_CNT_POS (10) +#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12) +#define RXON_RX_CHAIN_MIMO_CNT_POS (12) +#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14) +#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) + +/* rx_config flags */ +/* band & modulation selection */ +#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0) +#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1) +/* auto detection enable */ +#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2) +/* TGg protection when tx */ +#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3) +/* cck short slot & preamble */ +#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4) +#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5) +/* antenna selection */ +#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7) +#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00) +#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9) +/* radar detection enable */ +#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12) +#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13) +/* rx response to host with 8-byte TSF +* (according to ON_AIR deassertion) */ +#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) + + +/* HT flags */ +#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) +#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) + +#define RXON_FLG_HT_OPERATING_MODE_POS (23) + +#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23) +#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23) + +#define RXON_FLG_CHANNEL_MODE_POS (25) +#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25) + +/* channel mode */ +enum { + CHANNEL_MODE_LEGACY = 0, + CHANNEL_MODE_PURE_40 = 1, + CHANNEL_MODE_MIXED = 2, + CHANNEL_MODE_RESERVED = 3, +}; +#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS) + +/* CTS to self (if spec allows) flag */ +#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30) + +/* rx_config filter flags */ +/* accept all data frames */ +#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0) +/* pass control & management to host */ +#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1) +/* accept multi-cast */ +#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2) +/* don't decrypt uni-cast frames */ +#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3) +/* don't decrypt multi-cast frames */ +#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4) +/* STA is associated */ +#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5) +/* transfer to host non bssid beacons in associated state */ +#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) + +/** + * REPLY_RXON = 0x10 (command, has simple generic response) + * + * RXON tunes the radio tuner to a service channel, and sets up a number + * of parameters that are used primarily for Rx, but also for Tx operations. + * + * NOTE: When tuning to a new channel, driver must set the + * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent + * info within the device, including the station tables, tx retry + * rate tables, and txpower tables. Driver must build a new station + * table and txpower table before transmitting anything on the RXON + * channel. + * + * NOTE: All RXONs wipe clean the internal txpower table. Driver must + * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), + * regardless of whether RXON_FILTER_ASSOC_MSK is set. + */ + +struct iwl_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 ofdm_ht_triple_stream_basic_rates; + u8 reserved5; + __le16 acquisition_data; + __le16 reserved6; +} __packed; + +/* + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + */ +struct iwl_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 reserved1; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 ofdm_ht_triple_stream_basic_rates; + u8 reserved2; + __le16 rx_chain_select_flags; + __le16 acquisition_data; + __le32 reserved3; +} __packed; + +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 +#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ + +/* + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + */ +struct iwl_rxon_time_cmd { + __le64 timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + u8 dtim_period; + u8 delta_cp_bss_tbtts; +} __packed; + +/* + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + */ +/** + * struct iwl5000_channel_switch_cmd + * @band: 0- 5.2GHz, 1- 2.4GHz + * @expect_beacon: 0- resume transmits after channel switch + * 1- wait for beacon to resume transmits + * @channel: new channel number + * @rxon_flags: Rx on flags + * @rxon_filter_flags: filtering parameters + * @switch_time: switch time in extended beacon format + * @reserved: reserved bytes + */ +struct iwl5000_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; +} __packed; + +/** + * struct iwl6000_channel_switch_cmd + * @band: 0- 5.2GHz, 1- 2.4GHz + * @expect_beacon: 0- resume transmits after channel switch + * 1- wait for beacon to resume transmits + * @channel: new channel number + * @rxon_flags: Rx on flags + * @rxon_filter_flags: filtering parameters + * @switch_time: switch time in extended beacon format + * @reserved: reserved bytes + */ +struct iwl6000_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; +} __packed; + +/* + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + */ +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; /* 0 - OK, 1 - fail */ +} __packed; + +/****************************************************************************** + * (2) + * Quality-of-Service (QOS) Commands & Responses: + * + *****************************************************************************/ + +/** + * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM + * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd + * + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +} __packed; + +/* QoS flags defines */ +#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) +#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02) +#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10) + +/* Number of Access Categories (AC) (EDCA), queues 0..3 */ +#define AC_NUM 4 + +/* + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + * + * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs + * 0: Background, 1: Best Effort, 2: Video, 3: Voice. + */ +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM]; +} __packed; + +/****************************************************************************** + * (3) + * Add/Modify Stations Commands & Responses: + * + *****************************************************************************/ +/* + * Multi station support + */ + +/* Special, dedicated locations within device's station table */ +#define IWL_AP_ID 0 +#define IWL_AP_ID_PAN 1 +#define IWL_STA_ID 2 +#define IWLAGN_PAN_BCAST_ID 14 +#define IWLAGN_BROADCAST_ID 15 +#define IWLAGN_STATION_COUNT 16 + +#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT + +#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) +#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) +#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13) +#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) +#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) +#define STA_FLG_MAX_AGG_SIZE_POS (19) +#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19) +#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21) +#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22) +#define STA_FLG_AGG_MPDU_DENSITY_POS (23) +#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23) + +/* Use in mode field. 1: modify existing entry, 0: add new station entry */ +#define STA_CONTROL_MODIFY_MSK 0x01 + +/* key flags __le16*/ +#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007) +#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000) +#define STA_KEY_FLG_WEP cpu_to_le16(0x0001) +#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002) +#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003) + +#define STA_KEY_FLG_KEYID_POS 8 +#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800) +/* wep key is either from global key (0) or from station info array (1) */ +#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008) + +/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ +#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) +#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) +#define STA_KEY_MAX_NUM 8 +#define STA_KEY_MAX_NUM_PAN 16 +/* must not match WEP_INVALID_OFFSET */ +#define IWLAGN_HW_KEY_DEFAULT 0xfe + +/* Flags indicate whether to modify vs. don't change various station params */ +#define STA_MODIFY_KEY_MASK 0x01 +#define STA_MODIFY_TID_DISABLE_TX 0x02 +#define STA_MODIFY_TX_RATE_MSK 0x04 +#define STA_MODIFY_ADDBA_TID_MSK 0x08 +#define STA_MODIFY_DELBA_TID_MSK 0x10 +#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 + +/* agn */ +struct iwl_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + u8 key_offset; + u8 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ + __le64 tx_secur_seq_cnt; + __le64 hw_tkip_mic_rx_key; + __le64 hw_tkip_mic_tx_key; +} __packed; + +/** + * struct sta_id_modify + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change + * + * Driver selects unused table index when adding new station, + * or the index to a pre-existing station entry when modifying that station. + * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). + * + * modify_mask flags select which parameters to modify vs. leave alone. + */ +struct sta_id_modify { + u8 addr[ETH_ALEN]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +} __packed; + +/* + * REPLY_ADD_STA = 0x18 (command) + * + * The device contains an internal table of per-station information, + * with info on security keys, aggregation parameters, and Tx rates for + * initial Tx attempt and any retries (agn devices uses + * REPLY_TX_LINK_QUALITY_CMD, + * + * REPLY_ADD_STA sets up the table entry for one station, either creating + * a new entry, or modifying a pre-existing one. + * + * NOTE: RXON command (without "associated" bit set) wipes the station table + * clean. Moving into RF_KILL state does this also. Driver must set up + * new station table before transmitting anything on the RXON channel + * (except active scans or active measurements; those commands carry + * their own txpower/rate setup data). + * + * When getting started on a new channel, driver must set up the + * IWL_BROADCAST_ID entry (last entry in the table). For a client + * station in a BSS, once an AP is selected, driver sets up the AP STA + * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP + * are all that are needed for a BSS client station. If the device is + * used as AP, or in an IBSS network, driver must set up station table + * entries for all STAs in network, starting with index IWL_STA_ID. + */ + +struct iwl_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + __le16 legacy_reserved; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __packed; + + +#define ADD_STA_SUCCESS_MSK 0x1 +#define ADD_STA_NO_ROOM_IN_TABLE 0x2 +#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 +#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 +/* + * REPLY_ADD_STA = 0x18 (response) + */ +struct iwl_add_sta_resp { + u8 status; /* ADD_STA_* */ +} __packed; + +#define REM_STA_SUCCESS_MSK 0x1 +/* + * REPLY_REM_STA = 0x19 (response) + */ +struct iwl_rem_sta_resp { + u8 status; +} __packed; + +/* + * REPLY_REM_STA = 0x19 (command) + */ +struct iwl_rem_sta_cmd { + u8 num_sta; /* number of removed stations */ + u8 reserved[3]; + u8 addr[ETH_ALEN]; /* MAC addr of the first station */ + u8 reserved2[2]; +} __packed; + + +/* WiFi queues mask */ +#define IWL_SCD_BK_MSK BIT(0) +#define IWL_SCD_BE_MSK BIT(1) +#define IWL_SCD_VI_MSK BIT(2) +#define IWL_SCD_VO_MSK BIT(3) +#define IWL_SCD_MGMT_MSK BIT(3) + +/* PAN queues mask */ +#define IWL_PAN_SCD_BK_MSK BIT(4) +#define IWL_PAN_SCD_BE_MSK BIT(5) +#define IWL_PAN_SCD_VI_MSK BIT(6) +#define IWL_PAN_SCD_VO_MSK BIT(7) +#define IWL_PAN_SCD_MGMT_MSK BIT(7) +#define IWL_PAN_SCD_MULTICAST_MSK BIT(8) + +#define IWL_AGG_TX_QUEUE_MSK 0xffc00 + +#define IWL_DROP_ALL BIT(1) + +/* + * REPLY_TXFIFO_FLUSH = 0x1e(command and response) + * + * When using full FIFO flush this command checks the scheduler HW block WR/RD + * pointers to check if all the frames were transferred by DMA into the + * relevant TX FIFO queue. Only when the DMA is finished and the queue is + * empty the command can finish. + * This command is used to flush the TXFIFO from transmit commands, it may + * operate on single or multiple queues, the command queue can't be flushed by + * this command. The command response is returned when all the queue flush + * operations are done. Each TX command flushed return response with the FLUSH + * status set in the TX response status. When FIFO flush operation is used, + * the flush operation ends when both the scheduler DMA done and TXFIFO empty + * are set. + * + * @queue_control: bit mask for which queues to flush + * @flush_control: flush controls + * 0: Dump single MSDU + * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. + * 2: Dump all FIFO + */ +struct iwl_txfifo_flush_cmd_v3 { + __le32 queue_control; + __le16 flush_control; + __le16 reserved; +} __packed; + +struct iwl_txfifo_flush_cmd_v2 { + __le16 queue_control; + __le16 flush_control; +} __packed; + +/* + * REPLY_WEP_KEY = 0x20 + */ +struct iwl_wep_key { + u8 key_index; + u8 key_offset; + u8 reserved1[2]; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __packed; + +struct iwl_wep_cmd { + u8 num_keys; + u8 global_key_type; + u8 flags; + u8 reserved; + struct iwl_wep_key key[0]; +} __packed; + +#define WEP_KEY_WEP_TYPE 1 +#define WEP_KEYS_MAX 4 +#define WEP_INVALID_OFFSET 0xff +#define WEP_KEY_LEN_64 5 +#define WEP_KEY_LEN_128 13 + +/****************************************************************************** + * (4) + * Rx Responses: + * + *****************************************************************************/ + +#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0) +#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1) + +#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0) +#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) +#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) +#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70 +#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 +#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7) + +#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) +#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) +#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) +#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) +#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) +#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8) + +#define RX_RES_STATUS_STATION_FOUND (1<<6) +#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7) + +#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) +#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) +#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) +#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) +#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) + +#define RX_MPDU_RES_STATUS_ICV_OK (0x20) +#define RX_MPDU_RES_STATUS_MIC_OK (0x40) +#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) +#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) + + +#define IWLAGN_RX_RES_PHY_CNT 8 +#define IWLAGN_RX_RES_AGC_IDX 1 +#define IWLAGN_RX_RES_RSSI_AB_IDX 2 +#define IWLAGN_RX_RES_RSSI_C_IDX 3 +#define IWLAGN_OFDM_AGC_MSK 0xfe00 +#define IWLAGN_OFDM_AGC_BIT_POS 9 +#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff +#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00 +#define IWLAGN_OFDM_RSSI_A_BIT_POS 0 +#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000 +#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000 +#define IWLAGN_OFDM_RSSI_B_BIT_POS 16 +#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff +#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00 +#define IWLAGN_OFDM_RSSI_C_BIT_POS 0 + +struct iwlagn_non_cfg_phy { + __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */ +} __packed; + + +/* + * REPLY_RX = 0xc3 (response only, not a command) + * Used only for legacy (non 11n) frames. + */ +struct iwl_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ + u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ + u8 stat_id; /* configurable DSP phy data set ID */ + u8 reserved1; + __le64 timestamp; /* TSF at on air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le16 phy_flags; /* general phy flags: band, modulation, ... */ + __le16 channel; /* channel number */ + u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ + __le32 rate_n_flags; /* RATE_MCS_* */ + __le16 byte_count; /* frame's byte-count */ + __le16 frame_time; /* frame's time on the air */ +} __packed; + +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __packed; + + +/****************************************************************************** + * (5) + * Tx Commands & Responses: + * + * Driver must place each REPLY_TX command into one of the prioritized Tx + * queues in host DRAM, shared between driver and device (see comments for + * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode + * are preparing to transmit, the device pulls the Tx command over the PCI + * bus via one of the device's Tx DMA channels, to fill an internal FIFO + * from which data will be transmitted. + * + * uCode handles all timing and protocol related to control frames + * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler + * handle reception of block-acks; uCode updates the host driver via + * REPLY_COMPRESSED_BA. + * + * uCode handles retrying Tx when an ACK is expected but not received. + * This includes trying lower data rates than the one requested in the Tx + * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn). + * + * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. + * This command must be executed after every RXON command, before Tx can occur. + *****************************************************************************/ + +/* REPLY_TX Tx flags field */ + +/* + * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it + * before this frame. if CTS-to-self required check + * RXON_FLG_SELF_CTS_EN status. + */ +#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0) + +/* 1: Expect ACK from receiving station + * 0: Don't expect ACK (MAC header's duration field s/b 0) + * Set this for unicast frames, but not broadcast/multicast. */ +#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) + +/* For agn devices: + * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). + * Tx command's initial_rate_index indicates first rate to try; + * uCode walks through table for additional Tx attempts. + * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. + * This rate will be used for all Tx attempts; it will not be scaled. */ +#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4) + +/* 1: Expect immediate block-ack. + * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ +#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) + +/* Tx antenna selection field; reserved (0) for agn devices. */ +#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) + +/* 1: Ignore Bluetooth priority for this frame. + * 0: Delay Tx until Bluetooth device is done (normal usage). */ +#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12) + +/* 1: uCode overrides sequence control field in MAC header. + * 0: Driver provides sequence control field in MAC header. + * Set this for management frames, non-QOS data frames, non-unicast frames, + * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ +#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) + +/* 1: This frame is non-last MPDU; more fragments are coming. + * 0: Last fragment, or not using fragmentation. */ +#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14) + +/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. + * 0: No TSF required in outgoing frame. + * Set this for transmitting beacons and probe responses. */ +#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16) + +/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword + * alignment of frame's payload data field. + * 0: No pad + * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 + * field (but not both). Driver must align frame data (i.e. data following + * MAC header) to DWORD boundary. */ +#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20) + +/* accelerate aggregation support + * 0 - no CCMP encryption; 1 - CCMP encryption */ +#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22) + +/* HCCA-AP - disable duration overwriting. */ +#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) + + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x03 +#define TX_CMD_SEC_SHIFT 6 +#define TX_CMD_SEC_KEY128 0x08 + +/* + * REPLY_TX = 0x1c (command) + */ + +/* + * 4965 uCode updates these Tx attempt count values in host DRAM. + * Used for managing Tx retries when expecting block-acks. + * Driver should set these fields to 0. + */ +struct iwl_dram_scratch { + u8 try_cnt; /* Tx attempts */ + u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ + __le16 reserved; +} __packed; + +struct iwl_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + /* uCode may modify this field of the Tx command (in host DRAM!). + * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ + struct iwl_dram_scratch scratch; + + /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* Index of destination station in uCode's station table */ + u8 sta_id; + + /* Type of security encryption: CCM or TKIP */ + u8 sec_ctl; /* TX_CMD_SEC_* */ + + /* + * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial + * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for + * data frames, this field may be used to selectively reduce initial + * rate (via non-0 value) for special frames (e.g. management), while + * still supporting rate scaling for all frames. + */ + u8 initial_rate_index; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + + /* Host DRAM physical address pointer to "scratch" in this command. + * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; + +/* + * TX command response is sent after *agn* transmission attempts. + * + * both postpone and abort status are expected behavior from uCode. there is + * no special operation required from driver; except for RFKILL_FLUSH, + * which required tx flush host command to flush all the tx frames in queues + */ +enum { + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + /* postpone TX */ + TX_STATUS_POSTPONE_DELAY = 0x40, + TX_STATUS_POSTPONE_FEW_BYTES = 0x41, + TX_STATUS_POSTPONE_BT_PRIO = 0x42, + TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, + TX_STATUS_POSTPONE_CALC_TTAK = 0x44, + /* abort TX */ + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_STATUS_FAIL_DRAIN_FLOW = 0x85, + TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_HOST_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +#define TX_PACKET_MODE_REGULAR 0x0000 +#define TX_PACKET_MODE_BURST_SEQ 0x0100 +#define TX_PACKET_MODE_BURST_FIRST 0x0200 + +enum { + TX_POWER_PA_NOT_ACTIVE = 0x0, +}; + +enum { + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_DELAY_MSK = 0x00000040, + TX_STATUS_ABORT_MSK = 0x00000080, + TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ + TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ + TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ +}; + +/* ******************************* + * TX aggregation status + ******************************* */ + +enum { + AGG_TX_STATE_TRANSMITTED = 0x00, + AGG_TX_STATE_UNDERRUN_MSK = 0x01, + AGG_TX_STATE_BT_PRIO_MSK = 0x02, + AGG_TX_STATE_FEW_BYTES_MSK = 0x04, + AGG_TX_STATE_ABORT_MSK = 0x08, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40, + AGG_TX_STATE_SCD_QUERY_MSK = 0x80, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, + AGG_TX_STATE_RESPONSE_MSK = 0x1ff, + AGG_TX_STATE_DUMP_TX_MSK = 0x200, + AGG_TX_STATE_DELAY_TX_MSK = 0x400 +}; + +#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */ +#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */ +#define AGG_TX_TRY_POS 12 + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) + +/* # tx attempts for first frame in aggregation */ +#define AGG_TX_STATE_TRY_CNT_POS 12 +#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 + +/* Command ID and sequence number of Tx command for this frame */ +#define AGG_TX_STATE_SEQ_NUM_POS 16 +#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for + * a single frame. Multiple attempts, at various bit rates, may have + * been made for this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for + * 2 or more frames that used block-acknowledge. All frames were + * transmitted at same rate. Rate scaling may have been used if first + * frame in this new agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered here; + * block-ack has not been received by the time the agn device records + * this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the sending station (this agn device), rather than whether it was + * received successfully by the destination station. + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __packed; + +/* + * definitions for initial rate index field + * bits [3:0] initial rate index + * bits [6:4] rate table color, used for the initial rate + * bit-7 invalid rate indication + * i.e. rate was not chosen from rate table + * or rate table color was changed during frame retries + * refer tlc rate info + */ + +#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0 +#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f +#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4 +#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70 +#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 + +/* refer to ra_tid */ +#define IWLAGN_TX_RES_TID_POS 0 +#define IWLAGN_TX_RES_TID_MSK 0x0f +#define IWLAGN_TX_RES_RA_POS 4 +#define IWLAGN_TX_RES_RA_MSK 0xf0 + +struct iwlagn_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ + u8 failure_rts; /* # failures due to unsuccessful RTS */ + u8 failure_frame; /* # failures due to no ACK (unused for agg) */ + + /* For non-agg: Rate at which frame was successful. + * For agg: Rate at which all frames were transmitted. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* For non-agg: RTS + CTS + frame tx attempts time + ACK. + * For agg: RTS + CTS + aggregation tx time + block-ack time. */ + __le16 wireless_media_time; /* uSecs */ + + u8 pa_status; /* RF power amplifier measurement (not used) */ + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_C[3]; + + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; /* tid (0:3), sta_id (4:7) */ + __le16 frame_ctrl; + /* + * For non-agg: frame status TX_STATUS_* + * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status + * fields follow this one, up to frame_count. + * Bit fields: + * 11- 0: AGG_TX_STATE_* status code + * 15-12: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a + * member of a previous aggregation block). If rate + * scaling is used, retry count indicates the rate + * table entry used for all frames in the new agg. + * 31-16: Sequence # for this frame's Tx cmd (not SSN!) + */ + struct agg_tx_status status; /* TX status (in aggregation - + * status of 1st frame) */ +} __packed; +/* + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + * + * Reports Block-Acknowledge from recipient station + */ +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + + /* Index of recipient (BA-sending) station in uCode's station table */ + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; + u8 txed; /* number of frames sent */ + u8 txed_2_done; /* number of frames acked */ + __le16 reserved1; +} __packed; + +/* + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + * + */ + +/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ +#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) + +/* # of EDCA prioritized tx fifos */ +#define LINK_QUAL_AC_NUM AC_NUM + +/* # entries in rate scale table to support Tx retries */ +#define LINK_QUAL_MAX_RETRY_NUM 16 + +/* Tx antenna selection values */ +#define LINK_QUAL_ANT_A_MSK (1 << 0) +#define LINK_QUAL_ANT_B_MSK (1 << 1) +#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + + +/** + * struct iwl_link_qual_general_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_general_params { + u8 flags; + + /* No entries at or above this (driver chosen) index contain MIMO */ + u8 mimo_delimiter; + + /* Best single antenna to use for single stream (legacy, SISO). */ + u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* Best antennas to use for MIMO (unused for 4965, assumes both). */ + u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* + * If driver needs to use different initial rates for different + * EDCA QOS access categories (as implemented by tx fifos 0-3), + * this table will set that up, by indicating the indexes in the + * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. + * Otherwise, driver should set all entries to 0. + * + * Entry usage: + * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice + * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. + */ + u8 start_rate_index[LINK_QUAL_AC_NUM]; +} __packed; + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) +#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) + +#define LINK_QUAL_AGG_DISABLE_START_DEF (3) +#define LINK_QUAL_AGG_DISABLE_START_MAX (255) +#define LINK_QUAL_AGG_DISABLE_START_MIN (0) + +#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) + +/** + * struct iwl_link_qual_agg_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_agg_params { + + /* + *Maximum number of uSec in aggregation. + * default set to 4000 (4 milliseconds) if not configured in .cfg + */ + __le16 agg_time_limit; + + /* + * Number of Tx retries allowed for a frame, before that frame will + * no longer be considered for the start of an aggregation sequence + * (scheduler will then try to tx it as single frame). + * Driver should set this to 3. + */ + u8 agg_dis_start_th; + + /* + * Maximum number of frames in aggregation. + * 0 = no limit (default). 1 = no aggregation. + * Other values = max # frames in aggregation. + */ + u8 agg_frame_cnt_limit; + + __le32 reserved; +} __packed; + +/* + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + * + * For agn devices + * + * Each station in the agn device's internal station table has its own table + * of 16 + * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when + * an ACK is not received. This command replaces the entire table for + * one station. + * + * NOTE: Station must already be in agn device's station table. + * Use REPLY_ADD_STA. + * + * The rate scaling procedures described below work well. Of course, other + * procedures are possible, and may work better for particular environments. + * + * + * FILLING THE RATE TABLE + * + * Given a particular initial rate and mode, as determined by the rate + * scaling algorithm described below, the Linux driver uses the following + * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the + * Link Quality command: + * + * + * 1) If using High-throughput (HT) (SISO or MIMO) initial rate: + * a) Use this same initial rate for first 3 entries. + * b) Find next lower available rate using same mode (SISO or MIMO), + * use for next 3 entries. If no lower rate available, switch to + * legacy mode (no HT40 channel, no MIMO, no short guard interval). + * c) If using MIMO, set command's mimo_delimiter to number of entries + * using MIMO (3 or 6). + * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel, + * no MIMO, no short guard interval), at the next lower bit rate + * (e.g. if second HT bit rate was 54, try 48 legacy), and follow + * legacy procedure for remaining table entries. + * + * 2) If using legacy initial rate: + * a) Use the initial rate for only one entry. + * b) For each following entry, reduce the rate to next lower available + * rate, until reaching the lowest available rate. + * c) When reducing rate, also switch antenna selection. + * d) Once lowest available rate is reached, repeat this rate until + * rate table is filled (16 entries), switching antenna each entry. + * + * + * ACCUMULATING HISTORY + * + * The rate scaling algorithm for agn devices, as implemented in Linux driver, + * uses two sets of frame Tx success history: One for the current/active + * modulation mode, and one for a speculative/search mode that is being + * attempted. If the speculative mode turns out to be more effective (i.e. + * actual transfer rate is better), then the driver continues to use the + * speculative mode as the new current active mode. + * + * Each history set contains, separately for each possible rate, data for a + * sliding window of the 62 most recent tx attempts at that rate. The data + * includes a shifting bitmap of success(1)/failure(0), and sums of successful + * and attempted frames, from which the driver can additionally calculate a + * success ratio (success / attempted) and number of failures + * (attempted - success), and control the size of the window (attempted). + * The driver uses the bit map to remove successes from the success sum, as + * the oldest tx attempts fall out of the window. + * + * When the agn device makes multiple tx attempts for a given frame, each + * attempt might be at a different rate, and have different modulation + * characteristics (e.g. antenna, fat channel, short guard interval), as set + * up in the rate scaling table in the Link Quality command. The driver must + * determine which rate table entry was used for each tx attempt, to determine + * which rate-specific history to update, and record only those attempts that + * match the modulation characteristics of the history set. + * + * When using block-ack (aggregation), all frames are transmitted at the same + * rate, since there is no per-attempt acknowledgment from the destination + * station. The Tx response struct iwl_tx_resp indicates the Tx rate in + * rate_n_flags field. After receiving a block-ack, the driver can update + * history for the entire block all at once. + * + * + * FINDING BEST STARTING RATE: + * + * When working with a selected initial modulation mode (see below), the + * driver attempts to find a best initial rate. The initial rate is the + * first entry in the Link Quality command's rate table. + * + * 1) Calculate actual throughput (success ratio * expected throughput, see + * table below) for current initial rate. Do this only if enough frames + * have been attempted to make the value meaningful: at least 6 failed + * tx attempts, or at least 8 successes. If not enough, don't try rate + * scaling yet. + * + * 2) Find available rates adjacent to current initial rate. Available means: + * a) supported by hardware && + * b) supported by association && + * c) within any constraints selected by user + * + * 3) Gather measured throughputs for adjacent rates. These might not have + * enough history to calculate a throughput. That's okay, we might try + * using one of them anyway! + * + * 4) Try decreasing rate if, for current rate: + * a) success ratio is < 15% || + * b) lower adjacent rate has better measured throughput || + * c) higher adjacent rate has worse throughput, and lower is unmeasured + * + * As a sanity check, if decrease was determined above, leave rate + * unchanged if: + * a) lower rate unavailable + * b) success ratio at current rate > 85% (very good) + * c) current measured throughput is better than expected throughput + * of lower rate (under perfect 100% tx conditions, see table below) + * + * 5) Try increasing rate if, for current rate: + * a) success ratio is < 15% || + * b) both adjacent rates' throughputs are unmeasured (try it!) || + * b) higher adjacent rate has better measured throughput || + * c) lower adjacent rate has worse throughput, and higher is unmeasured + * + * As a sanity check, if increase was determined above, leave rate + * unchanged if: + * a) success ratio at current rate < 70%. This is not particularly + * good performance; higher rate is sure to have poorer success. + * + * 6) Re-evaluate the rate after each tx frame. If working with block- + * acknowledge, history and statistics may be calculated for the entire + * block (including prior history that fits within the history windows), + * before re-evaluation. + * + * FINDING BEST STARTING MODULATION MODE: + * + * After working with a modulation mode for a "while" (and doing rate scaling), + * the driver searches for a new initial mode in an attempt to improve + * throughput. The "while" is measured by numbers of attempted frames: + * + * For legacy mode, search for new mode after: + * 480 successful frames, or 160 failed frames + * For high-throughput modes (SISO or MIMO), search for new mode after: + * 4500 successful frames, or 400 failed frames + * + * Mode switch possibilities are (3 for each mode): + * + * For legacy: + * Change antenna, try SISO (if HT association), try MIMO (if HT association) + * For SISO: + * Change antenna, try MIMO, try shortened guard interval (SGI) + * For MIMO: + * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI) + * + * When trying a new mode, use the same bit rate as the old/current mode when + * trying antenna switches and shortened guard interval. When switching to + * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate + * for which the expected throughput (under perfect conditions) is about the + * same or slightly better than the actual measured throughput delivered by + * the old/current mode. + * + * Actual throughput can be estimated by multiplying the expected throughput + * by the success ratio (successful / attempted tx frames). Frame size is + * not considered in this calculation; it assumes that frame size will average + * out to be fairly consistent over several samples. The following are + * metric values for expected throughput assuming 100% success ratio. + * Only G band has support for CCK rates: + * + * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60 + * + * G: 7 13 35 58 40 57 72 98 121 154 177 186 186 + * A: 0 0 0 0 40 57 72 98 121 154 177 186 186 + * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202 + * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211 + * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251 + * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257 + * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257 + * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264 + * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289 + * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293 + * + * After the new mode has been tried for a short while (minimum of 6 failed + * frames or 8 successful frames), compare success ratio and actual throughput + * estimate of the new mode with the old. If either is better with the new + * mode, continue to use the new mode. + * + * Continue comparing modes until all 3 possibilities have been tried. + * If moving from legacy to HT, try all 3 possibilities from the new HT + * mode. After trying all 3, a best mode is found. Continue to use this mode + * for the longer "while" described above (e.g. 480 successful frames for + * legacy), and then repeat the search process. + * + */ +struct iwl_link_quality_cmd { + + /* Index of destination/recipient station in uCode's station table */ + u8 sta_id; + u8 reserved1; + __le16 control; /* not used */ + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + + /* + * Rate info; when using rate-scaling, Tx command's initial_rate_index + * specifies 1st Tx rate attempted, via index into this table. + * agn devices works its way through table when retrying Tx. + */ + struct { + __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ + } rs_table[LINK_QUAL_MAX_RETRY_NUM]; + __le32 reserved2; +} __packed; + +/* + * BT configuration enable flags: + * bit 0 - 1: BT channel announcement enabled + * 0: disable + * bit 1 - 1: priority of BT device enabled + * 0: disable + * bit 2 - 1: BT 2 wire support enabled + * 0: disable + */ +#define BT_COEX_DISABLE (0x0) +#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0) +#define BT_ENABLE_PRIORITY BIT(1) +#define BT_ENABLE_2_WIRE BIT(2) + +#define BT_COEX_DISABLE (0x0) +#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY) + +#define BT_LEAD_TIME_MIN (0x0) +#define BT_LEAD_TIME_DEF (0x1E) +#define BT_LEAD_TIME_MAX (0xFF) + +#define BT_MAX_KILL_MIN (0x1) +#define BT_MAX_KILL_DEF (0x5) +#define BT_MAX_KILL_MAX (0xFF) + +#define BT_DURATION_LIMIT_DEF 625 +#define BT_DURATION_LIMIT_MAX 1250 +#define BT_DURATION_LIMIT_MIN 625 + +#define BT_ON_THRESHOLD_DEF 4 +#define BT_ON_THRESHOLD_MAX 1000 +#define BT_ON_THRESHOLD_MIN 1 + +#define BT_FRAG_THRESHOLD_DEF 0 +#define BT_FRAG_THRESHOLD_MAX 0 +#define BT_FRAG_THRESHOLD_MIN 0 + +#define BT_AGG_THRESHOLD_DEF 1200 +#define BT_AGG_THRESHOLD_MAX 8000 +#define BT_AGG_THRESHOLD_MIN 400 + +/* + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + * + * agn devices support hardware handshake with Bluetooth device on + * same platform. Bluetooth device alerts wireless device when it will Tx; + * wireless device can delay or kill its own Tx to accommodate. + */ +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +} __packed; + +#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0) + +#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5)) +#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3 +#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0 +#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1 +#define IWLAGN_BT_FLAG_COEX_MODE_3W 2 +#define IWLAGN_BT_FLAG_COEX_MODE_4W 3 + +#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6) +/* Disable Sync PSPoll on SCO/eSCO */ +#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7) + +#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */ +#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */ + +#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF +#define IWLAGN_BT_PRIO_BOOST_MIN 0x00 +#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0 +#define IWLAGN_BT_PRIO_BOOST_DEFAULT32 0xF0F0F0F0 + +#define IWLAGN_BT_MAX_KILL_DEFAULT 5 + +#define IWLAGN_BT3_T7_DEFAULT 1 + +enum iwl_bt_kill_idx { + IWL_BT_KILL_DEFAULT = 0, + IWL_BT_KILL_OVERRIDE = 1, + IWL_BT_KILL_REDUCE = 2, +}; + +#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000) +#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000) +#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff) +#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE cpu_to_le32(0) + +#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2 + +#define IWLAGN_BT3_T2_DEFAULT 0xc + +#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0)) +#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1)) +#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2)) +#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3)) +#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4)) +#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5)) +#define IWLAGN_BT_VALID_REDUCED_TX_PWR cpu_to_le16(BIT(6)) +#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7)) + +#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \ + IWLAGN_BT_VALID_BOOST | \ + IWLAGN_BT_VALID_MAX_KILL | \ + IWLAGN_BT_VALID_3W_TIMERS | \ + IWLAGN_BT_VALID_KILL_ACK_MASK | \ + IWLAGN_BT_VALID_KILL_CTS_MASK | \ + IWLAGN_BT_VALID_REDUCED_TX_PWR | \ + IWLAGN_BT_VALID_3W_LUT) + +#define IWLAGN_BT_REDUCED_TX_PWR BIT(0) + +#define IWLAGN_BT_DECISION_LUT_SIZE 12 + +struct iwl_basic_bt_cmd { + u8 flags; + u8 ledtime; /* unused */ + u8 max_kill; + u8 bt3_timer_t7_value; + __le32 kill_ack_mask; + __le32 kill_cts_mask; + u8 bt3_prio_sample_time; + u8 bt3_timer_t2_value; + __le16 bt4_reaction_time; /* unused */ + __le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE]; + /* + * bit 0: use reduced tx power for control frame + * bit 1 - 7: reserved + */ + u8 reduce_txpower; + u8 reserved; + __le16 valid; +}; + +struct iwl_bt_cmd_v1 { + struct iwl_basic_bt_cmd basic; + u8 prio_boost; + /* + * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask + * if configure the following patterns + */ + u8 tx_prio_boost; /* SW boost of WiFi tx priority */ + __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ +}; + +struct iwl_bt_cmd_v2 { + struct iwl_basic_bt_cmd basic; + __le32 prio_boost; + /* + * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask + * if configure the following patterns + */ + u8 reserved; + u8 tx_prio_boost; /* SW boost of WiFi tx priority */ + __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ +}; + +#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0)) + +struct iwlagn_bt_sco_cmd { + __le32 flags; +}; + +/****************************************************************************** + * (6) + * Spectrum Management (802.11h) Commands, Responses, Notifications: + * + *****************************************************************************/ + +/* + * Spectrum Management + */ +#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ + RXON_FILTER_CTL2HOST_MSK | \ + RXON_FILTER_ACCEPT_GRP_MSK | \ + RXON_FILTER_DIS_DECRYPT_MSK | \ + RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ + RXON_FILTER_ASSOC_MSK | \ + RXON_FILTER_BCON_AWARE_MSK) + +struct iwl_measure_channel { + __le32 duration; /* measurement duration in extended beacon + * format */ + u8 channel; /* channel to measure */ + u8 type; /* see enum iwl_measure_type */ + __le16 reserved; +} __packed; + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + */ +struct iwl_spectrum_cmd { + __le16 len; /* number of bytes starting from token */ + u8 token; /* token id */ + u8 id; /* measurement id -- 0 or 1 */ + u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ + u8 periodic; /* 1 = periodic */ + __le16 path_loss_timeout; + __le32 start_time; /* start time in extended beacon format */ + __le32 reserved2; + __le32 flags; /* rxon flags */ + __le32 filter_flags; /* rxon filter flags */ + __le16 channel_count; /* minimum 1, maximum 10 */ + __le16 reserved3; + struct iwl_measure_channel channels[10]; +} __packed; + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + */ +struct iwl_spectrum_resp { + u8 token; + u8 id; /* id of the prior command replaced, or 0xff */ + __le16 status; /* 0 - command will be handled + * 1 - cannot handle (conflicts with another + * measurement) */ +} __packed; + +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, +}; + +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, + /* 4-5 reserved */ + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, +}; + +#define NUM_ELEMENTS_IN_HISTOGRAM 8 + +struct iwl_measurement_histogram { + __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ + __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ +} __packed; + +/* clear channel availability counters */ +struct iwl_measurement_cca_counters { + __le32 ofdm; + __le32 cck; +} __packed; + +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), + /* bits 5:6 are reserved */ + IWL_MEASURE_IDLE = (1 << 7), +}; + +/* + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + */ +struct iwl_spectrum_notification { + u8 id; /* measurement id -- 0 or 1 */ + u8 token; + u8 channel_index; /* index in measurement channel list */ + u8 state; /* 0 - start, 1 - stop */ + __le32 start_time; /* lower 32-bits of TSF */ + u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ + u8 channel; + u8 type; /* see enum iwl_measurement_type */ + u8 reserved1; + /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only + * valid if applicable for measurement type requested. */ + __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ + __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ + __le32 cca_time; /* channel load time in usecs */ + u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - + * unidentified */ + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; /* lower 32-bits of TSF */ + __le32 status; /* see iwl_measurement_status */ +} __packed; + +/****************************************************************************** + * (7) + * Power Management Commands, Responses, Notifications: + * + *****************************************************************************/ + +/** + * struct iwl_powertable_cmd - Power Table Command + * @flags: See below: + * + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * PM allow: + * bit 0 - '0' Driver not allow power management + * '1' Driver allow PM (use rest of parameters) + * + * uCode send sleep notifications: + * bit 1 - '0' Don't send sleep notification + * '1' send sleep notification (SEND_PM_NOTIFICATION) + * + * Sleep over DTIM + * bit 2 - '0' PM have to walk up every DTIM + * '1' PM could sleep over DTIM till listen Interval. + * + * PCI power managed + * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1) + * '1' !(PCI_CFG_LINK_CTRL & 0x1) + * + * Fast PD + * bit 4 - '1' Put radio to sleep when receiving frame for others + * + * Force sleep Modes + * bit 31/30- '00' use both mac/xtal sleeps + * '01' force Mac sleep + * '10' force xtal sleep + * '11' Illegal set + * + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * ucode assume sleep over DTIM is allowed and we don't need to wake up + * for every DTIM. + */ +#define IWL_POWER_VEC_SIZE 5 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1)) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) +#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) +#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) +#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5)) +#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6)) +#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7)) +#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8)) +#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9)) + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; + u8 debug_flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 keep_alive_beacons; +} __packed; + +/* + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * all devices identical. + */ +struct iwl_sleep_notification { + u8 pm_sleep_mode; + u8 pm_wakeup_src; + __le16 reserved; + __le32 sleep_time; + __le32 tsf_low; + __le32 bcon_timer; +} __packed; + +/* Sleep states. all devices identical. */ +enum { + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, + /* 3 reserved */ + IWL_PM_NUM_OF_MODES = 12, +}; + +/* + * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response) + */ +#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */ +#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */ +#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ +struct iwl_card_state_cmd { + __le32 status; /* CARD_STATE_CMD_* request new power state */ +} __packed; + +/* + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; + +#define HW_CARD_DISABLED 0x01 +#define SW_CARD_DISABLED 0x02 +#define CT_CARD_DISABLED 0x04 +#define RXON_CARD_DISABLED 0x10 + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __packed; + +/* 1000, and 6x00 */ +struct iwl_ct_kill_throttling_config { + __le32 critical_temperature_exit; + __le32 reserved; + __le32 critical_temperature_enter; +} __packed; + +/****************************************************************************** + * (8) + * Scan Commands, Responses, Notifications: + * + *****************************************************************************/ + +#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) +#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) + +/** + * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table + * + * One for each channel in the scan list. + * Each channel can independently select: + * 1) SSID for directed active scans + * 2) Txpower setting (for rate specified within Tx command) + * 3) How long to stay on-channel (behavior may be modified by quiet_time, + * quiet_plcp_th, good_CRC_th) + * + * To avoid uCode errors, make sure the following are true (see comments + * under struct iwl_scan_cmd about max_out_time and quiet_time): + * 1) If using passive_dwell (i.e. passive_dwell != 0): + * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) + * 2) quiet_time <= active_dwell + * 3) If restricting off-channel time (i.e. max_out_time !=0): + * passive_dwell < max_out_time + * active_dwell < max_out_time + */ + +struct iwl_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:20 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 21:31 reserved + */ + __le32 type; + __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __packed; + +/* set number of direct probes __le32 type */ +#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD, + * selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 entries. + * SSID IEs get transmitted in reverse order of entry. + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +} __packed; + +#define PROBE_OPTION_MAX 20 +#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) +#define IWL_GOOD_CRC_TH_DISABLED 0 +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) +#define IWL_MAX_CMD_SIZE 4096 + +/* + * REPLY_SCAN_CMD = 0x80 (command) + * + * The hardware scan command is very powerful; the driver can set it up to + * maintain (relatively) normal network traffic while doing a scan in the + * background. The max_out_time and suspend_time control the ratio of how + * long the device stays on an associated network channel ("service channel") + * vs. how long it's away from the service channel, i.e. tuned to other channels + * for scanning. + * + * max_out_time is the max time off-channel (in usec), and suspend_time + * is how long (in "extended beacon" format) that the scan is "suspended" + * after returning to the service channel. That is, suspend_time is the + * time that we stay on the service channel, doing normal work, between + * scan segments. The driver may set these parameters differently to support + * scanning when associated vs. not associated, and light vs. heavy traffic + * loads when associated. + * + * After receiving this command, the device's scan engine does the following; + * + * 1) Sends SCAN_START notification to driver + * 2) Checks to see if it has time to do scan for one channel + * 3) Sends NULL packet, with power-save (PS) bit set to 1, + * to tell AP that we're going off-channel + * 4) Tunes to first channel in scan list, does active or passive scan + * 5) Sends SCAN_RESULT notification to driver + * 6) Checks to see if it has time to do scan on *next* channel in list + * 7) Repeats 4-6 until it no longer has time to scan the next channel + * before max_out_time expires + * 8) Returns to service channel + * 9) Sends NULL packet with PS=0 to tell AP that we're back + * 10) Stays on service channel until suspend_time expires + * 11) Repeats entire process 2-10 until list is complete + * 12) Sends SCAN_COMPLETE notification + * + * For fast, efficient scans, the scan command also has support for staying on + * a channel for just a short time, if doing active scanning and getting no + * responses to the transmitted probe request. This time is controlled by + * quiet_time, and the number of received packets below which a channel is + * considered "quiet" is controlled by quiet_plcp_threshold. + * + * For active scanning on channels that have regulatory restrictions against + * blindly transmitting, the scan can listen before transmitting, to make sure + * that there is already legitimate activity on the channel. If enough + * packets are cleanly received on the channel (controlled by good_CRC_th, + * typical value 1), the scan engine starts transmitting probe requests. + * + * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. + * + * To avoid uCode errors, see timing restrictions described under + * struct iwl_scan_channel. + */ + +enum iwl_scan_flags { + /* BIT(0) currently unused */ + IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1), + /* bits 2-7 reserved */ +}; + +struct iwl_scan_cmd { + __le16 len; + u8 scan_flags; /* scan flags: see enum iwl_scan_flags */ + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 rx_chain; /* RXON_RX_CHAIN_* */ + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service chnl: + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __packed; + +/* Can abort will notify by complete notification with abort status. */ +#define CAN_ABORT_STATUS cpu_to_le32(0x1) +/* complete notification statuses */ +#define ABORT_STATUS 0x2 + +/* + * REPLY_SCAN_CMD = 0x80 (response) + */ +struct iwl_scanreq_notification { + __le32 status; /* 1: okay, 2: cannot fulfill request */ +} __packed; + +/* + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + */ +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __packed; + +#define SCAN_OWNER_STATUS 0x1 +#define MEASURE_OWNER_STATUS 0x2 + +#define IWL_PROBE_STATUS_OK 0 +#define IWL_PROBE_STATUS_TX_FAILED BIT(0) +/* error statuses combined with TX_FAILED */ +#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) +#define IWL_PROBE_STATUS_FAIL_BT BIT(2) + +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +/* + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + */ +struct iwl_scanresults_notification { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; /* not enough time to send */ + __le32 tsf_low; + __le32 tsf_high; + __le32 statistics[NUMBER_OF_STATISTICS]; +} __packed; + +/* + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + */ +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 bt_status; /* BT On/Off status */ + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +} __packed; + + +/****************************************************************************** + * (9) + * IBSS/AP Commands and Notifications: + * + *****************************************************************************/ + +enum iwl_ibss_manager { + IWL_NOT_IBSS_MANAGER = 0, + IWL_IBSS_MANAGER = 1, +}; + +/* + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + */ + +struct iwlagn_beacon_notif { + struct iwlagn_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __packed; + +/* + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + */ + +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __packed; + +/****************************************************************************** + * (10) + * Statistics Commands and Notifications: + * + *****************************************************************************/ + +#define IWL_TEMP_CONVERT 260 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/* Used for passing to driver number of successes and failures per rate */ +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __packed; + +/* statistics command response */ + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +} __packed; + +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; + +#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) + +struct statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +} __packed; + +struct statistics_rx_non_phy_bt { + struct statistics_rx_non_phy common; + /* additional stats for bt */ + __le32 num_bt_kills; + __le32 reserved[2]; +} __packed; + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; + struct statistics_rx_ht_phy ofdm_ht; +} __packed; + +struct statistics_rx_bt { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy_bt general; + struct statistics_rx_ht_phy ofdm_ht; +} __packed; + +/** + * struct statistics_tx_power - current tx power + * + * @ant_a: current tx power on chain a in 1/2 dB step + * @ant_b: current tx power on chain b in 1/2 dB step + * @ant_c: current tx power on chain c in 1/2 dB step + */ +struct statistics_tx_power { + u8 ant_a; + u8 ant_b; + u8 ant_c; + u8 reserved; +} __packed; + +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; +} __packed; + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; + /* + * "tx_power" are optional parameters provided by uCode, + * 6000 series is the only device provide the information, + * Those are reserved fields for all the other devices + */ + struct statistics_tx_power tx_power; + __le32 reserved1; +} __packed; + + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 reserved1; + __le32 reserved2; +} __packed; + +struct statistics_general_common { + __le32 temperature; /* radio temperature */ + __le32 temperature_m; /* radio voltage */ + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; +} __packed; + +struct statistics_bt_activity { + /* Tx statistics */ + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + /* Rx statistics */ + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +} __packed; + +struct statistics_general { + struct statistics_general_common common; + __le32 reserved2; + __le32 reserved3; +} __packed; + +struct statistics_general_bt { + struct statistics_general_common common; + struct statistics_bt_activity activity; + __le32 reserved2; + __le32 reserved3; +} __packed; + +#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) +#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) +#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) + +/* + * REPLY_STATISTICS_CMD = 0x9c, + * all devices identical. + * + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * + * If the CLEAR_STATS configuration flag is set, uCode will clear its + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * + * If the DISABLE_NOTIF configuration flag is set, uCode will not issue + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + */ +#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ +} __packed; + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ +#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) + +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +} __packed; + +struct iwl_bt_notif_statistics { + __le32 flag; + struct statistics_rx_bt rx; + struct statistics_tx tx; + struct statistics_general_bt general; +} __packed; + +/* + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + * + * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed + * in regardless of how many missed beacons, which mean when driver receive the + * notification, inside the command, it can find all the beacons information + * which include number of total missed beacons, number of consecutive missed + * beacons, number of beacons received and number of beacons expected to + * receive. + * + * If uCode detected consecutive_missed_beacons > 5, it will reset the radio + * in order to bring the radio/PHY back to working state; which has no relation + * to when driver will perform sensitivity calibration. + * + * Driver should set it own missed_beacon_threshold to decide when to perform + * sensitivity calibration based on number of consecutive missed beacons in + * order to improve overall performance, especially in noisy environment. + * + */ + +#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) +#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) +#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF + +struct iwl_missed_beacon_notif { + __le32 consecutive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; + + +/****************************************************************************** + * (11) + * Rx Calibration Commands: + * + * With the uCode used for open source drivers, most Tx calibration (except + * for Tx Power) and most Rx calibration is done by uCode during the + * "initialize" phase of uCode boot. Driver must calibrate only: + * + * 1) Tx power (depends on temperature), described elsewhere + * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas) + * 3) Receiver sensitivity (to optimize signal detection) + * + *****************************************************************************/ + +/** + * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) + * + * This command sets up the Rx signal detector for a sensitivity level that + * is high enough to lock onto all signals within the associated network, + * but low enough to ignore signals that are below a certain threshold, so as + * not to have too many "false alarms". False alarms are signals that the + * Rx DSP tries to lock onto, but then discards after determining that they + * are noise. + * + * The optimum number of false alarms is between 5 and 50 per 200 TUs + * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e. + * time listening, not transmitting). Driver must adjust sensitivity so that + * the ratio of actual false alarms to actual Rx time falls within this range. + * + * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each + * received beacon. These provide information to the driver to analyze the + * sensitivity. Don't analyze statistics that come in from scanning, or any + * other non-associated-network source. Pertinent statistics include: + * + * From "general" statistics (struct statistics_rx_non_phy): + * + * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) + * Measure of energy of desired signal. Used for establishing a level + * below which the device does not detect signals. + * + * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB) + * Measure of background noise in silent period after beacon. + * + * channel_load + * uSecs of actual Rx time during beacon period (varies according to + * how much time was spent transmitting). + * + * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: + * + * false_alarm_cnt + * Signal locks abandoned early (before phy-level header). + * + * plcp_err + * Signal locks abandoned late (during phy-level header). + * + * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from + * beacon to beacon, i.e. each value is an accumulation of all errors + * before and including the latest beacon. Values will wrap around to 0 + * after counting up to 2^32 - 1. Driver must differentiate vs. + * previous beacon's values to determine # false alarms in the current + * beacon period. + * + * Total number of false alarms = false_alarms + plcp_errs + * + * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd + * (notice that the start points for OFDM are at or close to settings for + * maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 + * + * If actual rate of OFDM false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), reduce sensitivity + * by *adding* 1 to all 4 of the table entries above, up to the max for + * each entry. Conversely, if false alarm rate is too low (less than 5 + * for each 204.8 msecs listening), *subtract* 1 from each entry to + * increase sensitivity. + * + * For CCK sensitivity, keep track of the following: + * + * 1). 20-beacon history of maximum background noise, indicated by + * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the + * 3 receivers. For any given beacon, the "silence reference" is + * the maximum of last 60 samples (20 beacons * 3 receivers). + * + * 2). 10-beacon history of strongest signal level, as indicated + * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers, + * i.e. the strength of the signal through the best receiver at the + * moment. These measurements are "upside down", with lower values + * for stronger signals, so max energy will be *minimum* value. + * + * Then for any given beacon, the driver must determine the *weakest* + * of the strongest signals; this is the minimum level that needs to be + * successfully detected, when using the best receiver at the moment. + * "Max cck energy" is the maximum (higher value means lower energy!) + * of the last 10 minima. Once this is determined, driver must add + * a little margin by adding "6" to it. + * + * 3). Number of consecutive beacon periods with too few false alarms. + * Reset this to 0 at the first beacon period that falls within the + * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). + * + * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd + * (notice that the start points for CCK are at maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 + * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 + * + * If actual rate of CCK false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), method for reducing + * sensitivity is: + * + * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * up to max 400. + * + * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, + * sensitivity has been reduced a significant amount; bring it up to + * a moderate 161. Otherwise, *add* 3, up to max 200. + * + * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, + * sensitivity has been reduced only a moderate or small amount; + * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, + * down to min 0. Otherwise (if gain has been significantly reduced), + * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. + * + * b) Save a snapshot of the "silence reference". + * + * If actual rate of CCK false alarms (+ plcp_errors) is too low + * (less than 5 for each 204.8 msecs listening), method for increasing + * sensitivity is used only if: + * + * 1a) Previous beacon did not have too many false alarms + * 1b) AND difference between previous "silence reference" and current + * "silence reference" (prev - current) is 2 or more, + * OR 2) 100 or more consecutive beacon periods have had rate of + * less than 5 false alarms per 204.8 milliseconds rx time. + * + * Method for increasing sensitivity: + * + * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, + * down to min 125. + * + * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * down to min 200. + * + * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. + * + * If actual rate of CCK false alarms (+ plcp_errors) is within good range + * (between 5 and 50 for each 204.8 msecs listening): + * + * 1) Save a snapshot of the silence reference. + * + * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), + * give some extra margin to energy threshold by *subtracting* 8 + * from value in HD_MIN_ENERGY_CCK_DET_INDEX. + * + * For all cases (too few, too many, good range), make sure that the CCK + * detection threshold (energy) is below the energy level for robust + * detection over the past 10 beacon periods, the "Max cck energy". + * Lower values mean higher energy; this means making sure that the value + * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". + * + */ + +/* + * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) + */ +#define HD_TABLE_SIZE (11) /* number of entries */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +/* + * Additional table entries in enhance SENSITIVITY_CMD + */ +#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11) +#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12) +#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17) +#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19) +#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21) +#define HD_RESERVED (22) + +/* number of entries for enhanced tbl */ +#define ENHANCE_HD_TABLE_SIZE (23) + +/* number of additional entries for enhanced tbl */ +#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE) + +#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1 cpu_to_le16(0) +#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1 cpu_to_le16(0) +#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1 cpu_to_le16(0) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(668) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(486) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(37) +#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(853) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4) +#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(476) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(99) + +#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2 cpu_to_le16(1) +#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2 cpu_to_le16(1) +#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2 cpu_to_le16(1) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(600) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(40) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(486) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(45) +#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(853) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(60) +#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(476) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(99) + + +/* Control field in struct iwl_sensitivity_cmd */ +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) + +/** + * struct iwl_sensitivity_cmd + * @control: (1) updates working table, (0) updates default table + * @table: energy threshold values, use HD_* as index into table + * + * Always use "1" in "control" to update uCode's working table and DSP. + */ +struct iwl_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ +} __packed; + +/* + * + */ +struct iwl_enhance_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */ +} __packed; + + +/** + * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) + * + * This command sets the relative gains of agn device's 3 radio receiver chains. + * + * After the first association, driver should accumulate signal and noise + * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 + * beacons from the associated network (don't collect statistics that come + * in from scanning, or any other non-network source). + * + * DISCONNECTED ANTENNA: + * + * Driver should determine which antennas are actually connected, by comparing + * average beacon signal levels for the 3 Rx chains. Accumulate (add) the + * following values over 20 beacons, one accumulator for each of the chains + * a/b/c, from struct statistics_rx_non_phy: + * + * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the strongest signal from among a/b/c. Compare the other two to the + * strongest. If any signal is more than 15 dB (times 20, unless you + * divide the accumulated values by 20) below the strongest, the driver + * considers that antenna to be disconnected, and should not try to use that + * antenna/chain for Rx or Tx. If both A and B seem to be disconnected, + * driver should declare the stronger one as connected, and attempt to use it + * (A and B are the only 2 Tx chains!). + * + * + * RX BALANCE: + * + * Driver should balance the 3 receivers (but just the ones that are connected + * to antennas, see above) for gain, by comparing the average signal levels + * detected during the silence after each beacon (background noise). + * Accumulate (add) the following values over 20 beacons, one accumulator for + * each of the chains a/b/c, from struct statistics_rx_non_phy: + * + * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the weakest background noise level from among a/b/c. This Rx chain + * will be the reference, with 0 gain adjustment. Attenuate other channels by + * finding noise difference: + * + * (accum_noise[i] - accum_noise[reference]) / 30 + * + * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. + * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the + * driver should limit the difference results to a range of 0-3 (0-4.5 dB), + * and set bit 2 to indicate "reduce gain". The value for the reference + * (weakest) chain should be "0". + * + * diff_gain_[abc] bit fields: + * 2: (1) reduce gain, (0) increase gain + * 1-0: amount of gain, units of 1.5 dB + */ + +/* Phy calibration command for series */ +enum { + IWL_PHY_CALIBRATE_DC_CMD = 8, + IWL_PHY_CALIBRATE_LO_CMD = 9, + IWL_PHY_CALIBRATE_TX_IQ_CMD = 11, + IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, + IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, + IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, + IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18, +}; + +/* This enum defines the bitmap of various calibrations to enable in both + * init ucode and runtime ucode through CALIBRATION_CFG_CMD. + */ +enum iwl_ucode_calib_cfg { + IWL_CALIB_CFG_RX_BB_IDX = BIT(0), + IWL_CALIB_CFG_DC_IDX = BIT(1), + IWL_CALIB_CFG_LO_IDX = BIT(2), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(3), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(4), + IWL_CALIB_CFG_NOISE_IDX = BIT(5), + IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7), + IWL_CALIB_CFG_PAPD_IDX = BIT(8), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(10), +}; + +#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \ + IWL_CALIB_CFG_DC_IDX | \ + IWL_CALIB_CFG_LO_IDX | \ + IWL_CALIB_CFG_TX_IQ_IDX | \ + IWL_CALIB_CFG_RX_IQ_IDX | \ + IWL_CALIB_CFG_CRYSTAL_IDX) + +#define IWL_CALIB_RT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \ + IWL_CALIB_CFG_DC_IDX | \ + IWL_CALIB_CFG_LO_IDX | \ + IWL_CALIB_CFG_TX_IQ_IDX | \ + IWL_CALIB_CFG_RX_IQ_IDX | \ + IWL_CALIB_CFG_TEMPERATURE_IDX | \ + IWL_CALIB_CFG_PAPD_IDX | \ + IWL_CALIB_CFG_TX_PWR_IDX | \ + IWL_CALIB_CFG_CRYSTAL_IDX) + +#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0)) + +struct iwl_calib_cfg_elmnt_s { + __le32 is_enable; + __le32 start; + __le32 send_res; + __le32 apply_res; + __le32 reserved; +} __packed; + +struct iwl_calib_cfg_status_s { + struct iwl_calib_cfg_elmnt_s once; + struct iwl_calib_cfg_elmnt_s perd; + __le32 flags; +} __packed; + +struct iwl_calib_cfg_cmd { + struct iwl_calib_cfg_status_s ucd_calib_cfg; + struct iwl_calib_cfg_status_s drv_calib_cfg; + __le32 reserved1; +} __packed; + +struct iwl_calib_hdr { + u8 op_code; + u8 first_group; + u8 groups_num; + u8 data_valid; +} __packed; + +struct iwl_calib_cmd { + struct iwl_calib_hdr hdr; + u8 data[0]; +} __packed; + +struct iwl_calib_xtal_freq_cmd { + struct iwl_calib_hdr hdr; + u8 cap_pin1; + u8 cap_pin2; + u8 pad[2]; +} __packed; + +#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700) +struct iwl_calib_temperature_offset_cmd { + struct iwl_calib_hdr hdr; + __le16 radio_sensor_offset; + __le16 reserved; +} __packed; + +struct iwl_calib_temperature_offset_v2_cmd { + struct iwl_calib_hdr hdr; + __le16 radio_sensor_offset_high; + __le16 radio_sensor_offset_low; + __le16 burntVoltageRef; + __le16 reserved; +} __packed; + +/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ +struct iwl_calib_chain_noise_reset_cmd { + struct iwl_calib_hdr hdr; + u8 data[0]; +}; + +/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */ +struct iwl_calib_chain_noise_gain_cmd { + struct iwl_calib_hdr hdr; + u8 delta_gain_1; + u8 delta_gain_2; + u8 pad[2]; +} __packed; + +/****************************************************************************** + * (12) + * Miscellaneous Commands: + * + *****************************************************************************/ + +/* + * LEDs Command & Response + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * + * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), + * this command turns it on or off, or sets up a periodic blinking cycle. + */ +struct iwl_led_cmd { + __le32 interval; /* "interval" in uSec */ + u8 id; /* 1: Activity, 2: Link, 3: Tech */ + u8 off; /* # intervals off while blinking; + * "0", with >0 "on" value, turns LED on */ + u8 on; /* # intervals on while blinking; + * "0", regardless of "off", turns LED off */ + u8 reserved; +} __packed; + +/* + * station priority table entries + * also used as potential "events" value for both + * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD + */ + +/* + * COEX events entry flag masks + * RP - Requested Priority + * WP - Win Medium Priority: priority assigned when the contention has been won + */ +#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1) +#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2) +#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4) + +#define COEX_CU_UNASSOC_IDLE_RP 4 +#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4 +#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4 +#define COEX_CU_CALIBRATION_RP 4 +#define COEX_CU_PERIODIC_CALIBRATION_RP 4 +#define COEX_CU_CONNECTION_ESTAB_RP 4 +#define COEX_CU_ASSOCIATED_IDLE_RP 4 +#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4 +#define COEX_CU_ASSOC_AUTO_SCAN_RP 4 +#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4 +#define COEX_CU_RF_ON_RP 6 +#define COEX_CU_RF_OFF_RP 4 +#define COEX_CU_STAND_ALONE_DEBUG_RP 6 +#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4 +#define COEX_CU_RSRVD1_RP 4 +#define COEX_CU_RSRVD2_RP 4 + +#define COEX_CU_UNASSOC_IDLE_WP 3 +#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3 +#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3 +#define COEX_CU_CALIBRATION_WP 3 +#define COEX_CU_PERIODIC_CALIBRATION_WP 3 +#define COEX_CU_CONNECTION_ESTAB_WP 3 +#define COEX_CU_ASSOCIATED_IDLE_WP 3 +#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3 +#define COEX_CU_ASSOC_AUTO_SCAN_WP 3 +#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3 +#define COEX_CU_RF_ON_WP 3 +#define COEX_CU_RF_OFF_WP 3 +#define COEX_CU_STAND_ALONE_DEBUG_WP 6 +#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3 +#define COEX_CU_RSRVD1_WP 3 +#define COEX_CU_RSRVD2_WP 3 + +#define COEX_UNASSOC_IDLE_FLAGS 0 +#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_UNASSOC_AUTO_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_CALIBRATION_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_PERIODIC_CALIBRATION_FLAGS 0 +/* + * COEX_CONNECTION_ESTAB: + * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. + */ +#define COEX_CONNECTION_ESTAB_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) +#define COEX_ASSOCIATED_IDLE_FLAGS 0 +#define COEX_ASSOC_MANUAL_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_ASSOC_AUTO_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0 +#define COEX_RF_ON_FLAGS 0 +#define COEX_RF_OFF_FLAGS 0 +#define COEX_STAND_ALONE_DEBUG_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_IPAN_ASSOC_LEVEL_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) +#define COEX_RSRVD1_FLAGS 0 +#define COEX_RSRVD2_FLAGS 0 +/* + * COEX_CU_RF_ON is the event wrapping all radio ownership. + * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. + */ +#define COEX_CU_RF_ON_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) + + +enum { + /* un-association part */ + COEX_UNASSOC_IDLE = 0, + COEX_UNASSOC_MANUAL_SCAN = 1, + COEX_UNASSOC_AUTO_SCAN = 2, + /* calibration */ + COEX_CALIBRATION = 3, + COEX_PERIODIC_CALIBRATION = 4, + /* connection */ + COEX_CONNECTION_ESTAB = 5, + /* association part */ + COEX_ASSOCIATED_IDLE = 6, + COEX_ASSOC_MANUAL_SCAN = 7, + COEX_ASSOC_AUTO_SCAN = 8, + COEX_ASSOC_ACTIVE_LEVEL = 9, + /* RF ON/OFF */ + COEX_RF_ON = 10, + COEX_RF_OFF = 11, + COEX_STAND_ALONE_DEBUG = 12, + /* IPAN */ + COEX_IPAN_ASSOC_LEVEL = 13, + /* reserved */ + COEX_RSRVD1 = 14, + COEX_RSRVD2 = 15, + COEX_NUM_OF_EVENTS = 16 +}; + +/* + * Coexistence WIFI/WIMAX Command + * COEX_PRIORITY_TABLE_CMD = 0x5a + * + */ +struct iwl_wimax_coex_event_entry { + u8 request_prio; + u8 win_medium_prio; + u8 reserved; + u8 flags; +} __packed; + +/* COEX flag masks */ + +/* Station table is valid */ +#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1) +/* UnMask wake up src at unassociated sleep */ +#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4) +/* UnMask wake up src at associated sleep */ +#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8) +/* Enable CoEx feature. */ +#define COEX_FLAGS_COEX_ENABLE_MSK (0x80) + +struct iwl_wimax_coex_cmd { + u8 flags; + u8 reserved[3]; + struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; +} __packed; + +/* + * Coexistence MEDIUM NOTIFICATION + * COEX_MEDIUM_NOTIFICATION = 0x5b + * + * notification from uCode to host to indicate medium changes + * + */ +/* + * status field + * bit 0 - 2: medium status + * bit 3: medium change indication + * bit 4 - 31: reserved + */ +/* status option values, (0 - 2 bits) */ +#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */ +#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */ +#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */ +#define COEX_MEDIUM_MSK (0x7) + +/* send notification status (1 bit) */ +#define COEX_MEDIUM_CHANGED (0x8) +#define COEX_MEDIUM_CHANGED_MSK (0x8) +#define COEX_MEDIUM_SHIFT (3) + +struct iwl_coex_medium_notification { + __le32 status; + __le32 events; +} __packed; + +/* + * Coexistence EVENT Command + * COEX_EVENT_CMD = 0x5c + * + * send from host to uCode for coex event request. + */ +/* flags options */ +#define COEX_EVENT_REQUEST_MSK (0x1) + +struct iwl_coex_event_cmd { + u8 flags; + u8 event; + __le16 reserved; +} __packed; + +struct iwl_coex_event_resp { + __le32 status; +} __packed; + + +/****************************************************************************** + * Bluetooth Coexistence commands + * + *****************************************************************************/ + +/* + * BT Status notification + * REPLY_BT_COEX_PROFILE_NOTIF = 0xce + */ +enum iwl_bt_coex_profile_traffic_load { + IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0, + IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1, + IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2, + IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3, +/* + * There are no more even though below is a u8, the + * indication from the BT device only has two bits. + */ +}; + +#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1 +#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2 + +/* BT UART message - Share Part (BT -> WiFi) */ +#define BT_UART_MSG_FRAME1MSGTYPE_POS (0) +#define BT_UART_MSG_FRAME1MSGTYPE_MSK \ + (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS) +#define BT_UART_MSG_FRAME1SSN_POS (3) +#define BT_UART_MSG_FRAME1SSN_MSK \ + (0x3 << BT_UART_MSG_FRAME1SSN_POS) +#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5) +#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \ + (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS) +#define BT_UART_MSG_FRAME1RESERVED_POS (6) +#define BT_UART_MSG_FRAME1RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME1RESERVED_POS) + +#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0) +#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \ + (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS) +#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2) +#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \ + (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS) +#define BT_UART_MSG_FRAME2CHLSEQN_POS (4) +#define BT_UART_MSG_FRAME2CHLSEQN_MSK \ + (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS) +#define BT_UART_MSG_FRAME2INBAND_POS (5) +#define BT_UART_MSG_FRAME2INBAND_MSK \ + (0x1 << BT_UART_MSG_FRAME2INBAND_POS) +#define BT_UART_MSG_FRAME2RESERVED_POS (6) +#define BT_UART_MSG_FRAME2RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME2RESERVED_POS) + +#define BT_UART_MSG_FRAME3SCOESCO_POS (0) +#define BT_UART_MSG_FRAME3SCOESCO_MSK \ + (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS) +#define BT_UART_MSG_FRAME3SNIFF_POS (1) +#define BT_UART_MSG_FRAME3SNIFF_MSK \ + (0x1 << BT_UART_MSG_FRAME3SNIFF_POS) +#define BT_UART_MSG_FRAME3A2DP_POS (2) +#define BT_UART_MSG_FRAME3A2DP_MSK \ + (0x1 << BT_UART_MSG_FRAME3A2DP_POS) +#define BT_UART_MSG_FRAME3ACL_POS (3) +#define BT_UART_MSG_FRAME3ACL_MSK \ + (0x1 << BT_UART_MSG_FRAME3ACL_POS) +#define BT_UART_MSG_FRAME3MASTER_POS (4) +#define BT_UART_MSG_FRAME3MASTER_MSK \ + (0x1 << BT_UART_MSG_FRAME3MASTER_POS) +#define BT_UART_MSG_FRAME3OBEX_POS (5) +#define BT_UART_MSG_FRAME3OBEX_MSK \ + (0x1 << BT_UART_MSG_FRAME3OBEX_POS) +#define BT_UART_MSG_FRAME3RESERVED_POS (6) +#define BT_UART_MSG_FRAME3RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME3RESERVED_POS) + +#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0) +#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \ + (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS) +#define BT_UART_MSG_FRAME4RESERVED_POS (6) +#define BT_UART_MSG_FRAME4RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME4RESERVED_POS) + +#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0) +#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \ + (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS) +#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2) +#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \ + (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS) +#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4) +#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \ + (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS) +#define BT_UART_MSG_FRAME5RESERVED_POS (6) +#define BT_UART_MSG_FRAME5RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME5RESERVED_POS) + +#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0) +#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \ + (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS) +#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5) +#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \ + (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS) +#define BT_UART_MSG_FRAME6RESERVED_POS (6) +#define BT_UART_MSG_FRAME6RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME6RESERVED_POS) + +#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0) +#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \ + (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS) +#define BT_UART_MSG_FRAME7PAGE_POS (3) +#define BT_UART_MSG_FRAME7PAGE_MSK \ + (0x1 << BT_UART_MSG_FRAME7PAGE_POS) +#define BT_UART_MSG_FRAME7INQUIRY_POS (4) +#define BT_UART_MSG_FRAME7INQUIRY_MSK \ + (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS) +#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5) +#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \ + (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS) +#define BT_UART_MSG_FRAME7RESERVED_POS (6) +#define BT_UART_MSG_FRAME7RESERVED_MSK \ + (0x3 << BT_UART_MSG_FRAME7RESERVED_POS) + +/* BT Session Activity 2 UART message (BT -> WiFi) */ +#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5) +#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \ + (0x1< + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include "iwl-debug.h" +#include "iwl-io.h" +#include "dev.h" +#include "agn.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, priv, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_u32(#name, mode, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t iwl_dbgfs_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + u32 val = 0; + char *buf; + ssize_t ret; + int i = 0; + bool device_format = false; + int offset = 0; + int len = 0; + int pos = 0; + int sram; + struct iwl_priv *priv = file->private_data; + const struct fw_img *img; + size_t bufsz; + + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + /* default is to dump the entire data segment */ + if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + priv->dbgfs_sram_offset = 0x800000; + if (!priv->ucode_loaded) + return -EINVAL; + img = &priv->fw->img[priv->cur_ucode]; + priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + } + len = priv->dbgfs_sram_len; + + if (len == -4) { + device_format = true; + len = 4; + } + + bufsz = 50 + len * 4; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + priv->dbgfs_sram_offset); + + /* adjust sram address since reads are only on even u32 boundaries */ + offset = priv->dbgfs_sram_offset & 0x3; + sram = priv->dbgfs_sram_offset & ~0x3; + + /* read the first u32 from sram */ + val = iwl_trans_read_mem32(priv->trans, sram); + + for (; len; len--) { + /* put the address at the start of every line */ + if (i == 0) + pos += scnprintf(buf + pos, bufsz - pos, + "%08X: ", sram + offset); + + if (device_format) + pos += scnprintf(buf + pos, bufsz - pos, + "%02x", (val >> (8 * (3 - offset))) & 0xff); + else + pos += scnprintf(buf + pos, bufsz - pos, + "%02x ", (val >> (8 * offset)) & 0xff); + + /* if all bytes processed, read the next u32 from sram */ + if (++offset == 4) { + sram += 4; + offset = 0; + val = iwl_trans_read_mem32(priv->trans, sram); + } + + /* put in extra spaces and split lines for human readability */ + if (++i == 16) { + i = 0; + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } else if (!(i & 7)) { + pos += scnprintf(buf + pos, bufsz - pos, " "); + } else if (!(i & 3)) { + pos += scnprintf(buf + pos, bufsz - pos, " "); + } + } + if (i) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = len; + } else if (sscanf(buf, "%x", &offset) == 1) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = -4; + } else { + priv->dbgfs_sram_offset = 0; + priv->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN]; + + if (!priv->wowlan_sram) + return -ENODATA; + + return simple_read_from_buffer(user_buf, count, ppos, + priv->wowlan_sram, + img->sec[IWL_UCODE_SECTION_DATA].len); +} +static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_station_entry *station; + struct iwl_tid_data *tid_data; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + priv->num_stations); + + for (i = 0; i < IWLAGN_STATION_COUNT; i++) { + station = &priv->stations[i]; + if (!station->used) + continue; + pos += scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", + i, station->sta.sta.addr, + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, + "TID seqno next_rclmd " + "rate_n_flags state txq\n"); + + for (j = 0; j < IWL_MAX_TID_COUNT; j++) { + tid_data = &priv->tid_data[i][j]; + pos += scnprintf(buf + pos, bufsz - pos, + "%d: 0x%.4x 0x%.4x 0x%.8x " + "%d %.2d", + j, tid_data->seq_number, + tid_data->next_reclaimed, + tid_data->agg.rate_n_flags, + tid_data->agg.state, + tid_data->agg.txq_id); + + if (tid_data->agg.wait_for_ba) + pos += scnprintf(buf + pos, bufsz - pos, + " - waitforba"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_nvm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 nvm_ver; + size_t eeprom_len = priv->eeprom_blob_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) + return -ENODATA; + + ptr = priv->eeprom_blob; + if (!ptr) + return -ENOMEM; + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + nvm_ver = priv->nvm_data->nvm_version; + pos += scnprintf(buf + pos, buf_size - pos, + "NVM version: 0x%x\n", nvm_ver); + for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n", + ofs, ptr + ofs); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IR) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_NO_IR ? + "passive only" : "active/passive"); + } + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IR) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_NO_IR ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", + test_bit(STATUS_CT_KILL, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < REPLY_MAX; cnt++) { + if (priv->rx_handlers_stats[cnt] > 0) + pos += scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + iwl_dvm_get_cmd_string(cnt), + priv->rx_handlers_stats[cnt]); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + memset(&priv->rx_handlers_stats[0], 0, + sizeof(priv->rx_handlers_stats)); + + return count; +} + +static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_rxon_context *ctx; + int pos = 0, i; + char buf[256 * NUM_IWL_RXON_CTX]; + const size_t bufsz = sizeof(buf); + + for_each_context(priv, ctx) { + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", + ctx->ctxid); + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + ctx->qos_data.def_qos_parm.ac[i].cw_min, + ctx->qos_data.def_qos_parm.ac[i].cw_max, + ctx->qos_data.def_qos_parm.ac[i].aifsn, + ctx->qos_data.def_qos_parm.ac[i].edca_txop); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "Thermal Throttling Mode: %s\n", + tt->advanced_tt ? "Advance" : "Legacy"); + pos += scnprintf(buf + pos, bufsz - pos, + "Thermal Throttling State: %d\n", + tt->state); + if (tt->advanced_tt) { + restriction = tt->restriction + tt->state; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx mode: %d\n", + restriction->tx_stream); + pos += scnprintf(buf + pos, bufsz - pos, + "Rx mode: %d\n", + restriction->rx_stream); + pos += scnprintf(buf + pos, bufsz - pos, + "HT mode: %d\n", + restriction->is_ht); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!iwl_is_any_associated(priv)) + priv->disable_ht40 = ht40 ? true : false; + else + return -EINVAL; + + return count; +} + +static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "11n 40MHz Mode: %s\n", + priv->disable_ht40 ? "Disabled" : "Enabled"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_temperature_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + + +static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int value; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%d", &value) != 1) + return -EINVAL; + + /* + * Our users expect 0 to be "CAM", but 0 isn't actually + * valid here. However, let's not confuse them and present + * IWL_POWER_INDEX_1 as "1", not "0". + */ + if (value == 0) + return -EINVAL; + else if (value > 0) + value -= 1; + + if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) + return -EINVAL; + + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + priv->power_data.debug_sleep_level_override = value; + + mutex_lock(&priv->mutex); + iwl_power_update_mode(priv, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[10]; + int pos, value; + const size_t bufsz = sizeof(buf); + + /* see the write function */ + value = priv->power_data.debug_sleep_level_override; + if (value >= 0) + value += 1; + + pos = scnprintf(buf, bufsz, "%d\n", value); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[200]; + int pos = 0, i; + const size_t bufsz = sizeof(buf); + struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd; + + pos += scnprintf(buf + pos, bufsz - pos, + "flags: %#.2x\n", le16_to_cpu(cmd->flags)); + pos += scnprintf(buf + pos, bufsz - pos, + "RX/TX timeout: %d/%d usec\n", + le32_to_cpu(cmd->rx_data_timeout), + le32_to_cpu(cmd->tx_data_timeout)); + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "sleep_interval[%d]: %d\n", i, + le32_to_cpu(cmd->sleep_interval[i])); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_FILE_OPS(wowlan_sram); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_FILE_OPS(thermal_throttling); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); +DEBUGFS_READ_FILE_OPS(temperature); +DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); +DEBUGFS_READ_FILE_OPS(current_sleep_command); + +static const char *fmt_value = " %-30s %10u\n"; +static const char *fmt_hex = " %-30s 0x%02X\n"; +static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +static const char *fmt_header = + "%-32s current cumulative delta max\n"; + +static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + lockdep_assert_held(&priv->statistics.lock); + + flag = le32_to_cpu(priv->statistics.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + + return p; +} + +static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_rx_phy) * 40 + + sizeof(struct statistics_rx_non_phy) * 40 + + sizeof(struct statistics_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct statistics_rx_non_phy *general, *accum_general; + struct statistics_rx_non_phy *delta_general, *max_general; + struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + spin_lock_bh(&priv->statistics.lock); + ofdm = &priv->statistics.rx_ofdm; + cck = &priv->statistics.rx_cck; + general = &priv->statistics.rx_non_phy; + ht = &priv->statistics.rx_ofdm_ht; + accum_ofdm = &priv->accum_stats.rx_ofdm; + accum_cck = &priv->accum_stats.rx_cck; + accum_general = &priv->accum_stats.rx_non_phy; + accum_ht = &priv->accum_stats.rx_ofdm_ht; + delta_ofdm = &priv->delta_stats.rx_ofdm; + delta_cck = &priv->delta_stats.rx_cck; + delta_general = &priv->delta_stats.rx_non_phy; + delta_ht = &priv->delta_stats.rx_ofdm_ht; + max_ofdm = &priv->max_delta_stats.rx_ofdm; + max_cck = &priv->max_delta_stats.rx_cck; + max_general = &priv->max_delta_stats.rx_non_phy; + max_ht = &priv->max_delta_stats.rx_ofdm_ht; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, + max_ofdm->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, + delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, + delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, + delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, delta_cck->overrun_err, + max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, delta_cck->sfd_timeout, + max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, delta_cck->fina_timeout, + max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, + delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), + accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, + max_cck->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(cck->mh_format_err), + accum_cck->mh_format_err, delta_cck->mh_format_err, + max_cck->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, delta_general->bogus_cts, + max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, delta_general->bogus_ack, + max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, + delta_general->channel_load, + max_general->channel_load); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, + max_general->beacon_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, + max_general->beacon_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, + max_general->beacon_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM_HT:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, + delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ht->mh_format_err), + accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), + accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), + accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), + accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_tx) * 48) + 250; + ssize_t ret; + struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + spin_lock_bh(&priv->statistics.lock); + + tx = &priv->statistics.tx; + accum_tx = &priv->accum_stats.tx; + delta_tx = &priv->delta_stats.tx; + max_tx = &priv->max_delta_stats.tx; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), + accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, + max_tx->dump_msdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), + accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, + max_tx->agg.ba_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, + delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg underrun:", + le32_to_cpu(tx->agg.underrun), + accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, + delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, + delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { + pos += scnprintf(buf + pos, bufsz - pos, + "tx power: (1/2 dB step)\n"); + if ((priv->nvm_data->valid_tx_ant & ANT_A) && + tx->tx_power.ant_a) + pos += scnprintf(buf + pos, bufsz - pos, + fmt_hex, "antenna A:", + tx->tx_power.ant_a); + if ((priv->nvm_data->valid_tx_ant & ANT_B) && + tx->tx_power.ant_b) + pos += scnprintf(buf + pos, bufsz - pos, + fmt_hex, "antenna B:", + tx->tx_power.ant_b); + if ((priv->nvm_data->valid_tx_ant & ANT_C) && + tx->tx_power.ant_c) + pos += scnprintf(buf + pos, bufsz - pos, + fmt_hex, "antenna C:", + tx->tx_power.ant_c); + } + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_general) * 10 + 300; + ssize_t ret; + struct statistics_general_common *general, *accum_general; + struct statistics_general_common *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + + spin_lock_bh(&priv->statistics.lock); + + general = &priv->statistics.common; + dbg = &priv->statistics.common.dbg; + div = &priv->statistics.common.div; + accum_general = &priv->accum_stats.common; + accum_dbg = &priv->accum_stats.common.dbg; + accum_div = &priv->accum_stats.common.div; + delta_general = &priv->delta_stats.common; + max_general = &priv->max_delta_stats.common; + delta_dbg = &priv->delta_stats.common.dbg; + max_dbg = &priv->max_delta_stats.common.dbg; + delta_div = &priv->delta_stats.common.div; + max_div = &priv->max_delta_stats.common.div; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature:", + le32_to_cpu(general->temperature)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature_m:", + le32_to_cpu(general->temperature_m)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "ttl_timestamp:", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200; + ssize_t ret; + struct statistics_bt_activity *bt, *accum_bt; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + if (!priv->bt_enable_flag) + return -EINVAL; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + ret = iwl_send_statistics_request(priv, 0, false); + mutex_unlock(&priv->mutex); + + if (ret) + return -EAGAIN; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + + spin_lock_bh(&priv->statistics.lock); + + bt = &priv->statistics.bt_activity; + accum_bt = &priv->accum_stats.bt_activity; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\t\t\tcurrent\t\t\taccumulative\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_tx_req_cnt), + accum_bt->hi_priority_tx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_tx_denied_cnt), + accum_bt->hi_priority_tx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_tx_req_cnt), + accum_bt->lo_priority_tx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_tx_denied_cnt), + accum_bt->lo_priority_tx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_rx_req_cnt), + accum_bt->hi_priority_rx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_rx_denied_cnt), + accum_bt->hi_priority_rx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_rx_req_cnt), + accum_bt->lo_priority_rx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_rx_denied_cnt), + accum_bt->lo_priority_rx_denied_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, + "(rx)num_bt_kills:\t\t%u\t\t\t%u\n", + le32_to_cpu(priv->statistics.num_bt_kills), + priv->statistics.accum_num_bt_kills); + + spin_unlock_bh(&priv->statistics.lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) + + (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200; + ssize_t ret; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY), + priv->reply_tx_stats.pp_delay); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES), + priv->reply_tx_stats.pp_few_bytes); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO), + priv->reply_tx_stats.pp_bt_prio); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD), + priv->reply_tx_stats.pp_quiet_period); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK), + priv->reply_tx_stats.pp_calc_ttak); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_tx_fail_reason( + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY), + priv->reply_tx_stats.int_crossed_retry); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT), + priv->reply_tx_stats.short_limit); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT), + priv->reply_tx_stats.long_limit); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN), + priv->reply_tx_stats.fifo_underrun); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW), + priv->reply_tx_stats.drain_flow); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH), + priv->reply_tx_stats.rfkill_flush); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE), + priv->reply_tx_stats.life_expire); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS), + priv->reply_tx_stats.dest_ps); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED), + priv->reply_tx_stats.host_abort); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY), + priv->reply_tx_stats.pp_delay); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID), + priv->reply_tx_stats.sta_invalid); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED), + priv->reply_tx_stats.frag_drop); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE), + priv->reply_tx_stats.tid_disable); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED), + priv->reply_tx_stats.fifo_flush); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_tx_fail_reason( + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL), + priv->reply_tx_stats.insuff_cf_poll); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX), + priv->reply_tx_stats.fail_hw_drop); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_tx_fail_reason( + TX_STATUS_FAIL_NO_BEACON_ON_RADAR), + priv->reply_tx_stats.sta_color_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", + priv->reply_tx_stats.unknown); + + pos += scnprintf(buf + pos, bufsz - pos, + "\nStatistics_Agg_TX_Error:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK), + priv->reply_agg_tx_stats.underrun); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK), + priv->reply_agg_tx_stats.bt_prio); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK), + priv->reply_agg_tx_stats.few_bytes); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK), + priv->reply_agg_tx_stats.abort); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_LAST_SENT_TTL_MSK), + priv->reply_agg_tx_stats.last_sent_ttl); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK), + priv->reply_agg_tx_stats.last_sent_try); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK), + priv->reply_agg_tx_stats.last_sent_bt_kill); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK), + priv->reply_agg_tx_stats.scd_query); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", + iwl_get_agg_tx_fail_reason( + AGG_TX_STATE_TEST_BAD_CRC32_MSK), + priv->reply_agg_tx_stats.bad_crc32); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK), + priv->reply_agg_tx_stats.response); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK), + priv->reply_agg_tx_stats.dump_tx); + pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", + iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK), + priv->reply_agg_tx_stats.delay_tx); + pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", + priv->reply_agg_tx_stats.unknown); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; + ssize_t ret; + struct iwl_sensitivity_data *data; + + data = &priv->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + + +static ssize_t iwl_dbgfs_chain_noise_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; + ssize_t ret; + struct iwl_chain_noise_data *data; + + data = &priv->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_power_save_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) & + CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + iwl_send_statistics_request(priv, 0, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[128]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", + priv->event_log.ucode_trace ? "On" : "Off"); + pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n", + priv->event_log.non_wraps_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n", + priv->event_log.wraps_once_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", + priv->event_log.wraps_more_count); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &trace) != 1) + return -EFAULT; + + if (trace) { + priv->event_log.ucode_trace = true; + if (iwl_is_alive(priv)) { + /* start collecting data now */ + mod_timer(&priv->ucode_trace, jiffies); + } + } else { + priv->event_log.ucode_trace = false; + del_timer_sync(&priv->ucode_trace); + } + + return count; +} + +static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", + priv->missed_beacon_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || + missed > IWL_MISSED_BEACON_THRESHOLD_MAX) + priv->missed_beacon_threshold = + IWL_MISSED_BEACON_THRESHOLD_DEF; + else + priv->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%u\n", + priv->plcp_delta_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &plcp) != 1) + return -EINVAL; + if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || + (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) + priv->plcp_delta_threshold = + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; + else + priv->plcp_delta_threshold = plcp; + return count; +} + +static ssize_t iwl_dbgfs_rf_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct iwl_rf_reset *rf_reset = &priv->rf_reset; + + pos += scnprintf(buf + pos, bufsz - pos, + "RF reset statistics\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + rf_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + rf_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + rf_reset->reset_reject_count); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_rf_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int ret; + + ret = iwl_force_rf_reset(priv, true); + return ret ? ret : count; +} + +static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &flush) != 1) + return -EINVAL; + + if (iwl_is_rfkill(priv)) + return -EFAULT; + + iwlagn_dev_txfifo_flush(priv); + + return count; +} + +static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char buf[200]; + const size_t bufsz = sizeof(buf); + + if (!priv->bt_enable_flag) { + pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); + } + pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n", + priv->bt_enable_flag); + pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n", + priv->bt_full_concurrent ? "full concurrency" : "3-wire"); + pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, " + "last traffic notif: %d\n", + priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load); + pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " + "kill_ack_mask: %x, kill_cts_mask: %x\n", + priv->bt_ch_announce, priv->kill_ack_mask, + priv->kill_cts_mask); + + pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: "); + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n"); + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + pos += scnprintf(buf + pos, bufsz - pos, "High\n"); + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + pos += scnprintf(buf + pos, bufsz - pos, "Low\n"); + break; + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + default: + pos += scnprintf(buf + pos, bufsz - pos, "None\n"); + break; + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + + int pos = 0; + char buf[40]; + const size_t bufsz = sizeof(buf); + + if (priv->cfg->ht_params) + pos += scnprintf(buf + pos, bufsz - pos, + "use %s for aggregation\n", + (priv->hw_params.use_rts_for_aggregation) ? + "rts/cts" : "cts-to-self"); + else + pos += scnprintf(buf + pos, bufsz - pos, "N/A"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int rts; + + if (!priv->cfg->ht_params) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &rts) != 1) + return -EINVAL; + if (rts) + priv->hw_params.use_rts_for_aggregation = true; + else + priv->hw_params.use_rts_for_aggregation = false; + return count; +} + +static int iwl_cmd_echo_test(struct iwl_priv *priv) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = REPLY_ECHO, + .len = { 0 }, + }; + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) + IWL_ERR(priv, "echo testing fail: 0X%x\n", ret); + else + IWL_DEBUG_INFO(priv, "echo testing pass\n"); + return ret; +} + +static ssize_t iwl_dbgfs_echo_test_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + iwl_cmd_echo_test(priv); + return count; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static ssize_t iwl_dbgfs_log_event_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf = NULL; + ssize_t ret; + + ret = iwl_dump_nic_event_log(priv, true, &buf); + if (ret > 0) + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_log_event_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; + int buf_size; + + /* check that the interface is up */ + if (!iwl_is_ready(priv)) + return -EAGAIN; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &event_log_flag) != 1) + return -EFAULT; + if (event_log_flag == 1) + iwl_dump_nic_event_log(priv, true, NULL); + + return count; +} +#endif + +static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[120]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "Sensitivity calibrations %s\n", + (priv->calib_disabled & + IWL_SENSITIVITY_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + pos += scnprintf(buf + pos, bufsz - pos, + "Chain noise calibrations %s\n", + (priv->calib_disabled & + IWL_CHAIN_NOISE_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + pos += scnprintf(buf + pos, bufsz - pos, + "Tx power calibrations %s\n", + (priv->calib_disabled & + IWL_TX_POWER_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &calib_disabled) != 1) + return -EFAULT; + + priv->calib_disabled = calib_disabled; + + return count; +} + +static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + bool restart_fw = iwlwifi_mod_params.restart_fw; + int ret; + + iwlwifi_mod_params.restart_fw = true; + + mutex_lock(&priv->mutex); + + /* take the return value to make compiler happy - it will fail anyway */ + ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL); + + mutex_unlock(&priv->mutex); + + iwlwifi_mod_params.restart_fw = restart_fw; + + return count; +} + +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); +DEBUGFS_READ_WRITE_FILE_OPS(rf_reset); +DEBUGFS_READ_FILE_OPS(rxon_flags); +DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(txfifo_flush); +DEBUGFS_READ_FILE_OPS(ucode_bt_stats); +DEBUGFS_READ_FILE_OPS(bt_traffic); +DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); +DEBUGFS_READ_FILE_OPS(reply_tx_error); +DEBUGFS_WRITE_FILE_OPS(echo_test); +DEBUGFS_WRITE_FILE_OPS(fw_restart); +#ifdef CONFIG_IWLWIFI_DEBUG +DEBUGFS_READ_WRITE_FILE_OPS(log_event); +#endif +DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); + +/* + * Create the debugfs files and directories + * + */ +int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) +{ + struct dentry *dir_data, *dir_rf, *dir_debug; + + priv->debugfs_dir = dbgfs_dir; + + dir_data = debugfs_create_dir("data", dbgfs_dir); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dbgfs_dir); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dbgfs_dir); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); + + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR); +#ifdef CONFIG_IWLWIFI_DEBUG + DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); +#endif + + if (iwl_advanced_bt_coexist(priv)) + DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); + + /* Calibrations disabled/enabled status*/ + DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); + + /* + * Create a symlink with mac80211. This is not very robust, as it does + * not remove the symlink created. The implicit assumption is that + * when the opmode exits, mac80211 will also exit, and will remove + * this symlink as part of its cleanup. + */ + if (priv->mac80211_registered) { + char buf[100]; + struct dentry *mac80211_dir, *dev_dir, *root_dir; + + dev_dir = dbgfs_dir->d_parent; + root_dir = dev_dir->d_parent; + mac80211_dir = priv->hw->wiphy->debugfsdir; + + snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name, + dev_dir->d_name.name); + + if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf)) + goto err; + } + + return 0; + +err: + IWL_ERR(priv, "failed to create the dvm debugfs entries\n"); + return -ENOMEM; +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h new file mode 100644 index 000000000000..0ba3e56d6015 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -0,0 +1,949 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (dev.h) for driver implementation definitions. + * Please use commands.h for uCode API definitions. + */ + +#ifndef __iwl_dev_h__ +#define __iwl_dev_h__ + +#include +#include +#include +#include +#include +#include + +#include "iwl-fw.h" +#include "iwl-eeprom-parse.h" +#include "iwl-csr.h" +#include "iwl-debug.h" +#include "iwl-agn-hw.h" +#include "iwl-op-mode.h" +#include "iwl-notif-wait.h" +#include "iwl-trans.h" + +#include "led.h" +#include "power.h" +#include "rs.h" +#include "tt.h" + +/* CT-KILL constants */ +#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ +#define CT_KILL_THRESHOLD 114 /* in Celsius */ +#define CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */ + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 200U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +#define IWL_NUM_SCAN_RATES (2) + + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +struct iwl_ht_config { + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; /* current smps mode */ +}; + +/* QoS structures */ +struct iwl_qos_info { + int qos_active; + struct iwl_qosparam_cmd def_qos_parm; +}; + +/** + * enum iwl_agg_state + * + * The state machine of the BA agreement establishment / tear down. + * These states relate to a specific RA / TID. + * + * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_STARTING: aggregation are starting (between start and oper) + * @IWL_AGG_ON: aggregation session is up + * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + */ +enum iwl_agg_state { + IWL_AGG_OFF = 0, + IWL_AGG_STARTING, + IWL_AGG_ON, + IWL_EMPTYING_HW_QUEUE_ADDBA, + IWL_EMPTYING_HW_QUEUE_DELBA, +}; + +/** + * struct iwl_ht_agg - aggregation state machine + + * This structs holds the states for the BA agreement establishment and tear + * down. It also holds the state during the BA session itself. This struct is + * duplicated for each RA / TID. + + * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the + * Tx response (REPLY_TX), and the block ack notification + * (REPLY_COMPRESSED_BA). + * @state: state of the BA agreement establishment / tear down. + * @txq_id: Tx queue used by the BA session + * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or + * the first packet to be sent in legacy HW queue in Tx AGG stop flow. + * Basically when next_reclaimed reaches ssn, we can tell mac80211 that + * we are ready to finish the Tx AGG stop / start flow. + * @wait_for_ba: Expect block-ack before next Tx reply + */ +struct iwl_ht_agg { + u32 rate_n_flags; + enum iwl_agg_state state; + u16 txq_id; + u16 ssn; + bool wait_for_ba; +}; + +/** + * struct iwl_tid_data - one for each RA / TID + + * This structs holds the states for each RA / TID. + + * @seq_number: the next WiFi sequence number to use + * @next_reclaimed: the WiFi sequence number of the next packet to be acked. + * This is basically (last acked packet++). + * @agg: aggregation state machine + */ +struct iwl_tid_data { + u16 seq_number; + u16 next_reclaimed; + struct iwl_ht_agg agg; +}; + +/* + * Structure should be accessed with sta_lock held. When station addition + * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only + * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock + * held. + */ +struct iwl_station_entry { + struct iwl_addsta_cmd sta; + u8 used, ctxid; + struct iwl_link_quality_cmd *lq; +}; + +/* + * iwl_station_priv: Driver's private station information + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is places in that + * space. + */ +struct iwl_station_priv { + struct iwl_rxon_context *ctx; + struct iwl_lq_sta lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; + u8 max_agg_bufsize; + u8 sta_id; +}; + +/** + * struct iwl_vif_priv - driver's private per-interface information + * + * When mac80211 allocates a virtual interface, it can allocate + * space for us to put data into. + */ +struct iwl_vif_priv { + struct iwl_rxon_context *ctx; + u8 ibss_bssid_sta_id; +}; + +struct iwl_sensitivity_ranges { + u16 min_nrg_cck; + + u16 nrg_th_cck; + u16 nrg_th_ofdm; + + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + + +/****************************************************************************** + * + * Functions implemented in core module which are forward declared here + * for use by iwl-[4-5].c + * + * NOTE: The implementation of these functions are not hardware specific + * which is why they are in the core module files. + * + * Naming convention -- + * iwl_ <-- Is part of iwlwifi + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * + ****************************************************************************/ +void iwl_update_chain_flags(struct iwl_priv *priv); +extern const u8 iwl_bcast_addr[ETH_ALEN]; + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 + +/* Sensitivity and chain noise calibration */ +#define INITIALIZATION_VALUE 0xFFFF +#define IWL_CAL_NUM_BEACONS 16 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS 3 + +enum iwlagn_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwlagn_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE, + IWL_CHAIN_NOISE_CALIBRATED, + IWL_CHAIN_NOISE_DONE, +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; + u8 state; +}; + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +/* reply_tx_statistics (for _agn devices) */ +struct reply_tx_error_statistics { + u32 pp_delay; + u32 pp_few_bytes; + u32 pp_bt_prio; + u32 pp_quiet_period; + u32 pp_calc_ttak; + u32 int_crossed_retry; + u32 short_limit; + u32 long_limit; + u32 fifo_underrun; + u32 drain_flow; + u32 rfkill_flush; + u32 life_expire; + u32 dest_ps; + u32 host_abort; + u32 bt_retry; + u32 sta_invalid; + u32 frag_drop; + u32 tid_disable; + u32 fifo_flush; + u32 insuff_cf_poll; + u32 fail_hw_drop; + u32 sta_color_mismatch; + u32 unknown; +}; + +/* reply_agg_tx_statistics (for _agn devices) */ +struct reply_agg_tx_error_statistics { + u32 underrun; + u32 bt_prio; + u32 few_bytes; + u32 abort; + u32 last_sent_ttl; + u32 last_sent_try; + u32 last_sent_bt_kill; + u32 scd_query; + u32 bad_crc32; + u32 response; + u32 dump_tx; + u32 delay_tx; + u32 unknown; +}; + +/* + * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds + * to perform continuous uCode event logging operation if enabled + */ +#define UCODE_TRACE_PERIOD (10) + +/* + * iwl_event_log: current uCode event log position + * + * @ucode_trace: enable/disable ucode continuous trace timer + * @num_wraps: how many times the event buffer wraps + * @next_entry: the entry just before the next one that uCode would fill + * @non_wraps_count: counter for no wrap detected when dump ucode events + * @wraps_once_count: counter for wrap once detected when dump ucode events + * @wraps_more_count: counter for wrap more than once detected + * when dump ucode events + */ +struct iwl_event_log { + bool ucode_trace; + u32 num_wraps; + u32 next_entry; + int non_wraps_count; + int wraps_once_count; + int wraps_more_count; +}; + +#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) + +/* BT Antenna Coupling Threshold (dB) */ +#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) + +/* Firmware reload counter and Timestamp */ +#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */ +#define IWL_MAX_CONTINUE_RELOAD_CNT 4 + + +struct iwl_rf_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + unsigned long last_reset_jiffies; +}; + +enum iwl_rxon_context_id { + IWL_RXON_CTX_BSS, + IWL_RXON_CTX_PAN, + + NUM_IWL_RXON_CTX +}; + +/* extend beacon time format bit shifting */ +/* + * for _agn devices + * bits 31:22 - extended + * bits 21:0 - interval + */ +#define IWLAGN_EXT_BEACON_TIME_POS 22 + +struct iwl_rxon_context { + struct ieee80211_vif *vif; + + u8 mcast_queue; + u8 ac_to_queue[IEEE80211_NUM_ACS]; + u8 ac_to_fifo[IEEE80211_NUM_ACS]; + + /* + * We could use the vif to indicate active, but we + * also need it to be active during disabling when + * we already removed the vif for type setting. + */ + bool always_active, is_active; + + bool ht_need_multiple_chains; + + enum iwl_rxon_context_id ctxid; + + u32 interface_modes, exclusive_interface_modes; + u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; + + /* + * We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware. + */ + const struct iwl_rxon_cmd active; + struct iwl_rxon_cmd staging; + + struct iwl_rxon_time_cmd timing; + + struct iwl_qos_info qos_data; + + u8 bcast_sta_id, ap_sta_id; + + u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; + u8 qos_cmd; + u8 wep_key_cmd; + + struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; + u8 key_mapping_keys; + + __le32 station_flags; + + int beacon_int; + + struct { + bool non_gf_sta_present; + u8 protection; + bool enabled, is_40mhz; + u8 extension_chan_offset; + } ht; +}; + +enum iwl_scan_type { + IWL_SCAN_NORMAL, + IWL_SCAN_RADIO_RESET, +}; + +/** + * struct iwl_hw_params + * + * Holds the module parameters + * + * @tx_chains_num: Number of TX chains + * @rx_chains_num: Number of RX chains + * @ct_kill_threshold: temperature threshold - in hw dependent unit + * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit + * relevant for 1000, 6000 and up + * @struct iwl_sensitivity_ranges: range of sensitivity values + * @use_rts_for_aggregation: use rts/cts protection for HT traffic + */ +struct iwl_hw_params { + u8 tx_chains_num; + u8 rx_chains_num; + bool use_rts_for_aggregation; + u32 ct_kill_threshold; + u32 ct_kill_exit_threshold; + + const struct iwl_sensitivity_ranges *sens; +}; + +/** + * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters + * @advanced_bt_coexist: support advanced bt coexist + * @bt_init_traffic_load: specify initial bt traffic load + * @bt_prio_boost: default bt priority boost value + * @agg_time_limit: maximum number of uSec in aggregation + * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode + */ +struct iwl_dvm_bt_params { + bool advanced_bt_coexist; + u8 bt_init_traffic_load; + u32 bt_prio_boost; + u16 agg_time_limit; + bool bt_sco_disable; + bool bt_session_2; +}; + +/** + * struct iwl_dvm_cfg - DVM firmware specific device configuration + * @set_hw_params: set hardware parameters + * @set_channel_switch: send channel switch command + * @nic_config: apply device specific configuration + * @temperature: read temperature + * @adv_thermal_throttle: support advance thermal throttle + * @support_ct_kill_exit: support ct kill exit condition + * @plcp_delta_threshold: plcp error rate threshold used to trigger + * radio tuning when there is a high receiving plcp error rate + * @chain_noise_scale: default chain noise scale used for gain computation + * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up + * @no_idle_support: do not support idle mode + * @bt_params: pointer to BT parameters + * @need_temp_offset_calib: need to perform temperature offset calibration + * @no_xtal_calib: some devices do not need crystal calibration data, + * don't send it to those + * @temp_offset_v2: support v2 of temperature offset calibration + * @adv_pm: advanced power management + */ +struct iwl_dvm_cfg { + void (*set_hw_params)(struct iwl_priv *priv); + int (*set_channel_switch)(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch); + void (*nic_config)(struct iwl_priv *priv); + void (*temperature)(struct iwl_priv *priv); + + const struct iwl_dvm_bt_params *bt_params; + s32 chain_noise_scale; + u8 plcp_delta_threshold; + bool adv_thermal_throttle; + bool support_ct_kill_exit; + bool hd_v2; + bool no_idle_support; + bool need_temp_offset_calib; + bool no_xtal_calib; + bool temp_offset_v2; + bool adv_pm; +}; + +struct iwl_wipan_noa_data { + struct rcu_head rcu_head; + u32 length; + u8 data[]; +}; + +/* Calibration disabling bit mask */ +enum { + IWL_CALIB_ENABLE_ALL = 0, + + IWL_SENSITIVITY_CALIB_DISABLED = BIT(0), + IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1), + IWL_TX_POWER_CALIB_DISABLED = BIT(2), + + IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF, +}; + +#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \ + ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific)) + +#define IWL_MAC80211_GET_DVM(_hw) \ + ((struct iwl_priv *) ((struct iwl_op_mode *) \ + (_hw)->priv)->op_mode_specific) + +struct iwl_priv { + + struct iwl_trans *trans; + struct device *dev; /* for debug prints only */ + const struct iwl_cfg *cfg; + const struct iwl_fw *fw; + const struct iwl_dvm_cfg *lib; + unsigned long status; + + spinlock_t sta_lock; + struct mutex mutex; + + unsigned long transport_queue_stop; + bool passive_no_rx; +#define IWL_INVALID_MAC80211_QUEUE 0xff + u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; + atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; + + unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + + struct napi_struct *napi; + + struct list_head calib_results; + + struct workqueue_struct *workqueue; + + struct iwl_hw_params hw_params; + + enum ieee80211_band band; + u8 valid_contexts; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); + + struct iwl_notif_wait_data notif_wait; + + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; + + /* ucode beacon time */ + u32 ucode_beacon_time; + int missed_beacon_threshold; + + /* track IBSS manager (last beacon) status */ + u32 ibss_manager; + + /* jiffies when last recovery from statistics was performed */ + unsigned long rx_statistics_jiffies; + + /*counters */ + u32 rx_handlers_stats[REPLY_MAX]; + + /* rf reset */ + struct iwl_rf_reset rf_reset; + + /* firmware reload counter and timestamp */ + unsigned long reload_jiffies; + int reload_count; + bool ucode_loaded; + + u8 plcp_delta_threshold; + + /* thermal calibration */ + s32 temperature; /* Celsius */ + s32 last_temperature; + + struct iwl_wipan_noa_data __rcu *noa_data; + + /* Scan related variables */ + unsigned long scan_start; + unsigned long scan_start_tsf; + void *scan_cmd; + enum ieee80211_band scan_band; + struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; + enum iwl_scan_type scan_type; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + + /* max number of station keys */ + u8 sta_key_max_num; + + bool new_scan_threshold_behaviour; + + bool wowlan; + + /* EEPROM MAC addresses */ + struct mac_address addresses[2]; + + struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; + + __le16 switch_channel; + + u8 start_calib; + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; + __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES]; + + struct iwl_ht_config current_ht_config; + + /* Rate scaling data */ + u8 retry_rate; + + int activity_timer_active; + + struct iwl_power_mgr power_data; + struct iwl_tt_mgmt thermal_throttle; + + /* station table variables */ + int num_stations; + struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; + unsigned long ucode_key_table; + struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; + atomic_t num_aux_in_flight; + + u8 mac80211_registered; + + /* Indication if ieee80211_ops->open has been called */ + u8 is_open; + + enum nl80211_iftype iw_mode; + + /* Last Rx'd beacon timestamp */ + u64 timestamp; + + struct { + __le32 flag; + struct statistics_general_common common; + struct statistics_rx_non_phy rx_non_phy; + struct statistics_rx_phy rx_ofdm; + struct statistics_rx_ht_phy rx_ofdm_ht; + struct statistics_rx_phy rx_cck; + struct statistics_tx tx; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct statistics_bt_activity bt_activity; + __le32 num_bt_kills, accum_num_bt_kills; +#endif + spinlock_t lock; + } statistics; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct { + struct statistics_general_common common; + struct statistics_rx_non_phy rx_non_phy; + struct statistics_rx_phy rx_ofdm; + struct statistics_rx_ht_phy rx_ofdm_ht; + struct statistics_rx_phy rx_cck; + struct statistics_tx tx; + struct statistics_bt_activity bt_activity; + } accum_stats, delta_stats, max_delta_stats; +#endif + + /* + * reporting the number of tids has AGG on. 0 means + * no AGGREGATION + */ + u8 agg_tids_count; + + struct iwl_rx_phy_res last_phy_res; + u32 ampdu_ref; + bool last_phy_res_valid; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + /* counts reply_tx error */ + struct reply_tx_error_statistics reply_tx_stats; + struct reply_agg_tx_error_statistics reply_agg_tx_stats; + + /* bt coex */ + u8 bt_enable_flag; + u8 bt_status; + u8 bt_traffic_load, last_bt_traffic_load; + bool bt_ch_announce; + bool bt_full_concurrent; + bool bt_ant_couple_ok; + __le32 kill_ack_mask; + __le32 kill_cts_mask; + __le16 bt_valid; + bool reduced_txpower; + u16 bt_on_thresh; + u16 bt_duration; + u16 dynamic_frag_thresh; + u8 bt_ci_compliance; + struct work_struct bt_traffic_change_work; + bool bt_enable_pspoll; + struct iwl_rxon_context *cur_rssi_ctx; + bool bt_is_sco; + + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct abort_scan; + + struct work_struct beacon_update; + struct iwl_rxon_context *beacon_ctx; + struct sk_buff *beacon_skb; + void *beacon_cmd; + + struct work_struct tt_work; + struct work_struct ct_enter; + struct work_struct ct_exit; + struct work_struct start_internal_scan; + struct work_struct tx_flush; + struct work_struct bt_full_concurrency; + struct work_struct bt_runtime_config; + + struct delayed_work scan_check; + + /* TX Power settings */ + s8 tx_power_user_lmt; + s8 tx_power_next; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* debugfs */ + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + bool disable_ht40; + void *wowlan_sram; +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + + struct iwl_nvm_data *nvm_data; + /* eeprom blob for debugfs */ + u8 *eeprom_blob; + size_t eeprom_blob_size; + + struct work_struct txpower_work; + u32 calib_disabled; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list ucode_trace; + + struct iwl_event_log event_log; + +#ifdef CONFIG_IWLWIFI_LEDS + struct led_classdev led; + unsigned long blink_on, blink_off; + bool led_registered; +#endif + + /* WoWLAN GTK rekey data */ + u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; + __le64 replay_ctr; + __le16 last_seq_ctl; + bool have_rekey_data; +#ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan_support; +#endif + + /* device_pointers: pointers to ucode event tables */ + struct { + u32 error_event_table; + u32 log_event_table; + } device_pointers; + + /* indicator of loaded ucode image */ + enum iwl_ucode_type cur_ucode; +}; /*iwl_priv */ + +static inline struct iwl_rxon_context * +iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + return vif_priv->ctx; +} + +#define for_each_context(priv, ctx) \ + for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ + ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ + if (priv->valid_contexts & BIT(ctx->ctxid)) + +static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) +{ + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_is_associated(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ + return iwl_is_associated_ctx(&priv->contexts[ctxid]); +} + +static inline int iwl_is_any_associated(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + for_each_context(priv, ctx) + if (iwl_is_associated_ctx(ctx)) + return true; + return false; +} + +#endif /* __iwl_dev_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c new file mode 100644 index 000000000000..34b41e5f7cfc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -0,0 +1,690 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +/* + * DVM device-specific data & functions + */ +#include "iwl-io.h" +#include "iwl-prph.h" +#include "iwl-eeprom-parse.h" + +#include "agn.h" +#include "dev.h" +#include "commands.h" + + +/* + * 1000 series + * =========== + */ + +/* + * For 1000, use advance thermal throttling critical temperature threshold, + * but legacy thermal management implementation for now. + * This is for the reason of 1000 uCode using advance thermal throttling API + * but not implement ct_kill_exit based on ct_kill exit temperature + * so the thermal throttling will still based on legacy thermal throttling + * management. + * The code here need to be modified once 1000 uCode has the advanced thermal + * throttling algorithm in place + */ +static void iwl1000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 1000 series */ +static void iwl1000_nic_config(struct iwl_priv *priv) +{ + /* Setting digital SVR for 1000 card to 1.32V */ + /* locking is acquired in iwl_set_bits_mask_prph() function */ + iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG, + APMG_SVR_DIGITAL_VOLTAGE_1_32, + ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); +} + +/** + * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv, + u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv, + u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, + u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & + (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >> + IWLAGN_EXT_BEACON_TIME_POS); + rem = (usec % interval) & iwl_beacon_time_mask_low(priv, + IWLAGN_EXT_BEACON_TIME_POS); + + return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval) +{ + u32 base_low = base & iwl_beacon_time_mask_low(priv, + IWLAGN_EXT_BEACON_TIME_POS); + u32 addon_low = addon & iwl_beacon_time_mask_low(priv, + IWLAGN_EXT_BEACON_TIME_POS); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & iwl_beacon_time_mask_high(priv, + IWLAGN_EXT_BEACON_TIME_POS)) + + (addon & iwl_beacon_time_mask_high(priv, + IWLAGN_EXT_BEACON_TIME_POS)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << IWLAGN_EXT_BEACON_TIME_POS); + } else + res += (1 << IWLAGN_EXT_BEACON_TIME_POS); + + return cpu_to_le32(res); +} + +static const struct iwl_sensitivity_ranges iwl1000_sensitivity = { + .min_nrg_cck = 95, + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 120, + .auto_corr_min_ofdm_mrc_x1 = 240, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 155, + .auto_corr_max_ofdm_mrc_x1 = 290, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) +{ + iwl1000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl1000_sensitivity; +} + +const struct iwl_dvm_cfg iwl_dvm_1000_cfg = { + .set_hw_params = iwl1000_hw_set_hw_params, + .nic_config = iwl1000_nic_config, + .temperature = iwlagn_temperature, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + + +/* + * 2000 series + * =========== + */ + +static void iwl2000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 2000 series */ +static void iwl2000_nic_config(struct iwl_priv *priv) +{ + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); +} + +static const struct iwl_sensitivity_ranges iwl2000_sensitivity = { + .min_nrg_cck = 97, + .auto_corr_min_ofdm = 80, + .auto_corr_min_ofdm_mrc = 128, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 192, + + .auto_corr_max_ofdm = 145, + .auto_corr_max_ofdm_mrc = 232, + .auto_corr_max_ofdm_x1 = 110, + .auto_corr_max_ofdm_mrc_x1 = 232, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 175, + .auto_corr_min_cck_mrc = 160, + .auto_corr_max_cck_mrc = 310, + .nrg_th_cck = 97, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) +{ + iwl2000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl2000_sensitivity; +} + +const struct iwl_dvm_cfg iwl_dvm_2000_cfg = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_105_cfg = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, + .adv_pm = true, +}; + +static const struct iwl_dvm_bt_params iwl2030_bt_params = { + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32, + .bt_sco_disable = true, + .bt_session_2 = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_2030_cfg = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .bt_params = &iwl2030_bt_params, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, + .adv_pm = true, +}; + +/* + * 5000 series + * =========== + */ + +/* NIC configuration for 5000 series */ +static const struct iwl_sensitivity_ranges iwl5000_sensitivity = { + .min_nrg_cck = 100, + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 120, + .auto_corr_max_ofdm_mrc_x1 = 240, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 200, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 100, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static const struct iwl_sensitivity_ranges iwl5150_sensitivity = { + .min_nrg_cck = 95, + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + /* max = min for performance bug in 5150 DSP */ + .auto_corr_max_ofdm_x1 = 105, + .auto_corr_max_ofdm_mrc_x1 = 220, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) + +static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) +{ + u16 temperature, voltage; + + temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature); + voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage); + + /* offset = temp - volt / coeff */ + return (s32)(temperature - + voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); +} + +static void iwl5150_set_ct_threshold(struct iwl_priv *priv) +{ + const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; + s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - + iwl_temp_calib_to_offset(priv); + + priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef; +} + +static void iwl5000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; +} + +static void iwl5000_hw_set_hw_params(struct iwl_priv *priv) +{ + iwl5000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl5000_sensitivity; +} + +static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) +{ + iwl5150_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl5150_sensitivity; +} + +static void iwl5150_temperature(struct iwl_priv *priv) +{ + u32 vt = 0; + s32 offset = iwl_temp_calib_to_offset(priv); + + vt = le32_to_cpu(priv->statistics.common.temperature); + vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; + /* now vt hold the temperature in Kelvin */ + priv->temperature = KELVIN_TO_CELSIUS(vt); + iwl_tt_handler(priv); +} + +static int iwl5000_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl5000_channel_switch_cmd cmd; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = { sizeof(cmd), }, + .data = { &cmd, }, + }; + + cmd.band = priv->band == IEEE80211_BAND_2GHZ; + ch = ch_switch->chandef.chan->hw_value; + IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", + ctx->active.channel, ch); + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + cmd.expect_beacon = + ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR; + + return iwl_dvm_send_cmd(priv, &hcmd); +} + +const struct iwl_dvm_cfg iwl_dvm_5000_cfg = { + .set_hw_params = iwl5000_hw_set_hw_params, + .set_channel_switch = iwl5000_hw_channel_switch, + .temperature = iwlagn_temperature, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .no_idle_support = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_5150_cfg = { + .set_hw_params = iwl5150_hw_set_hw_params, + .set_channel_switch = iwl5000_hw_channel_switch, + .temperature = iwl5150_temperature, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .no_idle_support = true, + .no_xtal_calib = true, +}; + + + +/* + * 6000 series + * =========== + */ + +static void iwl6000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 6000 series */ +static void iwl6000_nic_config(struct iwl_priv *priv) +{ + switch (priv->cfg->device_family) { + case IWL_DEVICE_FAMILY_6005: + case IWL_DEVICE_FAMILY_6030: + case IWL_DEVICE_FAMILY_6000: + break; + case IWL_DEVICE_FAMILY_6000i: + /* 2x2 IPA phy type */ + iwl_write32(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); + break; + case IWL_DEVICE_FAMILY_6050: + /* Indicate calibration version to uCode. */ + if (priv->nvm_data->calib_version >= 6) + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); + break; + case IWL_DEVICE_FAMILY_6150: + /* Indicate calibration version to uCode. */ + if (priv->nvm_data->calib_version >= 6) + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_6050_1x2); + break; + default: + WARN_ON(1); + } +} + +static const struct iwl_sensitivity_ranges iwl6000_sensitivity = { + .min_nrg_cck = 110, + .auto_corr_min_ofdm = 80, + .auto_corr_min_ofdm_mrc = 128, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 192, + + .auto_corr_max_ofdm = 145, + .auto_corr_max_ofdm_mrc = 232, + .auto_corr_max_ofdm_x1 = 110, + .auto_corr_max_ofdm_mrc_x1 = 232, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 175, + .auto_corr_min_cck_mrc = 160, + .auto_corr_max_cck_mrc = 310, + .nrg_th_cck = 110, + .nrg_th_ofdm = 110, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 336, + .nrg_th_cca = 62, +}; + +static void iwl6000_hw_set_hw_params(struct iwl_priv *priv) +{ + iwl6000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl6000_sensitivity; + +} + +static int iwl6000_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl6000_channel_switch_cmd *cmd; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = { sizeof(*cmd), }, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int err; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + hcmd.data[0] = cmd; + + cmd->band = priv->band == IEEE80211_BAND_2GHZ; + ch = ch_switch->chandef.chan->hw_value; + IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", + ctx->active.channel, ch); + cmd->channel = cpu_to_le16(ch); + cmd->rxon_flags = ctx->staging.flags; + cmd->rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd->switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd->switch_time); + cmd->expect_beacon = + ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR; + + err = iwl_dvm_send_cmd(priv, &hcmd); + kfree(cmd); + return err; +} + +const struct iwl_dvm_cfg iwl_dvm_6000_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +const struct iwl_dvm_cfg iwl_dvm_6005_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .need_temp_offset_calib = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_6050_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, +}; + +static const struct iwl_dvm_bt_params iwl6000_bt_params = { + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, + .bt_sco_disable = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_6030_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .bt_params = &iwl6000_bt_params, + .need_temp_offset_calib = true, + .adv_pm = true, +}; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c new file mode 100644 index 000000000000..ca4d6692cc4e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -0,0 +1,223 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iwl-io.h" +#include "iwl-trans.h" +#include "iwl-modparams.h" +#include "dev.h" +#include "agn.h" + +/* Throughput OFF time(ms) ON time (ms) + * >300 25 25 + * >200 to 300 40 40 + * >100 to 200 55 55 + * >70 to 100 65 65 + * >50 to 70 75 75 + * >20 to 50 85 85 + * >10 to 20 95 95 + * >5 to 10 110 110 + * >1 to 5 130 130 + * >0 to 1 167 167 + * <=0 SOLID ON + */ +static const struct ieee80211_tpt_blink iwl_blink[] = { + { .throughput = 0, .blink_time = 334 }, + { .throughput = 1 * 1024 - 1, .blink_time = 260 }, + { .throughput = 5 * 1024 - 1, .blink_time = 220 }, + { .throughput = 10 * 1024 - 1, .blink_time = 190 }, + { .throughput = 20 * 1024 - 1, .blink_time = 170 }, + { .throughput = 50 * 1024 - 1, .blink_time = 150 }, + { .throughput = 70 * 1024 - 1, .blink_time = 130 }, + { .throughput = 100 * 1024 - 1, .blink_time = 110 }, + { .throughput = 200 * 1024 - 1, .blink_time = 80 }, + { .throughput = 300 * 1024 - 1, .blink_time = 50 }, +}; + +/* Set led register off */ +void iwlagn_led_enable(struct iwl_priv *priv) +{ + iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); +} + +/* + * Adjust led blink rate to compensate on a MAC Clock difference on every HW + * Led blink rate analysis showed an average deviation of 20% on 5000 series + * and up. + * Need to compensate on the led on/off time per HW according to the deviation + * to achieve the desired led frequency + * The calculation is: (100-averageDeviation)/100 * blinkTime + * For code efficiency the calculation will be: + * compensation = (100 - averageDeviation) * 64 / 100 + * NewBlinkTime = (compensation * BlinkTime) / 64 + */ +static inline u8 iwl_blink_compensation(struct iwl_priv *priv, + u8 time, u16 compensation) +{ + if (!compensation) { + IWL_ERR(priv, "undefined blink compensation: " + "use pre-defined blinking time\n"); + return time; + } + + return (u8)((time * compensation) >> 6); +} + +static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = { sizeof(struct iwl_led_cmd), }, + .data = { led_cmd, }, + .flags = CMD_ASYNC, + }; + u32 reg; + + reg = iwl_read32(priv->trans, CSR_LED_REG); + if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) + iwl_write32(priv->trans, CSR_LED_REG, + reg & CSR_LED_BSM_CTRL_MSK); + + return iwl_dvm_send_cmd(priv, &cmd); +} + +/* Set led pattern command */ +static int iwl_led_cmd(struct iwl_priv *priv, + unsigned long on, + unsigned long off) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .interval = IWL_DEF_LED_INTRVL + }; + int ret; + + if (!test_bit(STATUS_READY, &priv->status)) + return -EBUSY; + + if (priv->blink_on == on && priv->blink_off == off) + return 0; + + if (off == 0) { + /* led is SOLID_ON */ + on = IWL_LED_SOLID; + } + + IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", + priv->cfg->base_params->led_compensation); + led_cmd.on = iwl_blink_compensation(priv, on, + priv->cfg->base_params->led_compensation); + led_cmd.off = iwl_blink_compensation(priv, off, + priv->cfg->base_params->led_compensation); + + ret = iwl_send_led_cmd(priv, &led_cmd); + if (!ret) { + priv->blink_on = on; + priv->blink_off = off; + } + return ret; +} + +static void iwl_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + unsigned long on = 0; + + if (brightness > 0) + on = IWL_LED_SOLID; + + iwl_led_cmd(priv, on, 0); +} + +static int iwl_led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + + return iwl_led_cmd(priv, *delay_on, *delay_off); +} + +void iwl_leds_init(struct iwl_priv *priv) +{ + int mode = iwlwifi_mod_params.led_mode; + int ret; + + if (mode == IWL_LED_DISABLE) { + IWL_INFO(priv, "Led disabled\n"); + return; + } + if (mode == IWL_LED_DEFAULT) + mode = priv->cfg->led_mode; + + priv->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(priv->hw->wiphy)); + priv->led.brightness_set = iwl_led_brightness_set; + priv->led.blink_set = iwl_led_blink_set; + priv->led.max_brightness = 1; + + switch (mode) { + case IWL_LED_DEFAULT: + WARN_ON(1); + break; + case IWL_LED_BLINK: + priv->led.default_trigger = + ieee80211_create_tpt_led_trigger(priv->hw, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED, + iwl_blink, ARRAY_SIZE(iwl_blink)); + break; + case IWL_LED_RF_STATE: + priv->led.default_trigger = + ieee80211_get_radio_led_name(priv->hw); + break; + } + + ret = led_classdev_register(priv->trans->dev, &priv->led); + if (ret) { + kfree(priv->led.name); + return; + } + + priv->led_registered = true; +} + +void iwl_leds_exit(struct iwl_priv *priv) +{ + if (!priv->led_registered) + return; + + led_classdev_unregister(&priv->led); + kfree(priv->led.name); +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h new file mode 100644 index 000000000000..1c6b2252d0f2 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_leds_h__ +#define __iwl_leds_h__ + + +struct iwl_priv; + +#define IWL_LED_SOLID 11 +#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) + +#define IWL_LED_ACTIVITY (0<<1) +#define IWL_LED_LINK (1<<1) + +#ifdef CONFIG_IWLWIFI_LEDS +void iwlagn_led_enable(struct iwl_priv *priv); +void iwl_leds_init(struct iwl_priv *priv); +void iwl_leds_exit(struct iwl_priv *priv); +#else +static inline void iwlagn_led_enable(struct iwl_priv *priv) +{ +} +static inline void iwl_leds_init(struct iwl_priv *priv) +{ +} +static inline void iwl_leds_exit(struct iwl_priv *priv) +{ +} +#endif + +#endif /* __iwl_leds_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c new file mode 100644 index 000000000000..e18629a16fb0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -0,0 +1,1300 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include "iwl-io.h" +#include "iwl-agn-hw.h" +#include "iwl-trans.h" +#include "iwl-modparams.h" + +#include "dev.h" +#include "agn.h" + +int iwlagn_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && + (addr < IWLAGN_RTC_DATA_UPPER_BOUND); +} + +int iwlagn_send_tx_power(struct iwl_priv *priv) +{ + struct iwlagn_tx_power_dbm_cmd tx_power_cmd; + u8 tx_ant_cfg_cmd; + + if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + /* half dBm need to multiply */ + tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); + + if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) { + /* + * For the newer devices which using enhanced/extend tx power + * table in EEPROM, the format is in half dBm. driver need to + * convert to dBm format before report to mac80211. + * By doing so, there is a possibility of 1/2 dBm resolution + * lost. driver will perform "round-up" operation before + * reporting, but it will cause 1/2 dBm tx power over the + * regulatory limit. Perform the checking here, if the + * "tx_power_user_lmt" is higher than EEPROM value (in + * half-dBm format), lower the tx power based on EEPROM + */ + tx_power_cmd.global_lmt = + priv->nvm_data->max_tx_pwr_half_dbm; + } + tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED; + tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO; + + if (IWL_UCODE_API(priv->fw->ucode_ver) == 1) + tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; + else + tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; + + return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0, + sizeof(tx_power_cmd), &tx_power_cmd); +} + +void iwlagn_temperature(struct iwl_priv *priv) +{ + lockdep_assert_held(&priv->statistics.lock); + + /* store temperature from correct statistics (in Celsius) */ + priv->temperature = le32_to_cpu(priv->statistics.common.temperature); + iwl_tt_handler(priv); +} + +int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +{ + int idx = 0; + int band_offset = 0; + + /* HT rate format: mac80211 wants an MCS number, which is just LSB */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + return idx; + /* Legacy rate format, search for match in table */ + } else { + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx - band_offset; + } + + return -1; +} + +int iwlagn_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + if (add) + return iwlagn_add_bssid_station(priv, vif_priv->ctx, + vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +/** + * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode + * + * pre-requirements: + * 1. acquire mutex before calling + * 2. make sure rf is on and not in exit state + */ +int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk) +{ + struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = { + .flush_control = cpu_to_le16(IWL_DROP_ALL), + }; + struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = { + .flush_control = cpu_to_le16(IWL_DROP_ALL), + }; + + u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | + IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK; + + if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) + queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK | + IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK | + IWL_PAN_SCD_MGMT_MSK | + IWL_PAN_SCD_MULTICAST_MSK; + + if (priv->nvm_data->sku_cap_11n_enable) + queue_control |= IWL_AGG_TX_QUEUE_MSK; + + if (scd_q_msk) + queue_control = scd_q_msk; + + IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control); + flush_cmd_v3.queue_control = cpu_to_le32(queue_control); + flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control); + + if (IWL_UCODE_API(priv->fw->ucode_ver) > 2) + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, + sizeof(flush_cmd_v3), + &flush_cmd_v3); + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, + sizeof(flush_cmd_v2), &flush_cmd_v2); +} + +void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + ieee80211_stop_queues(priv->hw); + if (iwlagn_txfifo_flush(priv, 0)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; + } + IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); + iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); +done: + ieee80211_wake_queues(priv->hw); + mutex_unlock(&priv->mutex); +} + +/* + * BT coex + */ +/* Notmal TDM */ +static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaeaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xcc00ff28), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xcc00aaaa), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xc0004000), + cpu_to_le32(0x00004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), +}; + + +/* Loose Coex */ +static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaeaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xcc00ff28), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xcc00aaaa), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), +}; + +/* Full concurrency */ +static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), +}; + +void iwlagn_send_advance_bt_config(struct iwl_priv *priv) +{ + struct iwl_basic_bt_cmd basic = { + .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT, + .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT, + .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, + .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, + }; + struct iwl_bt_cmd_v1 bt_cmd_v1; + struct iwl_bt_cmd_v2 bt_cmd_v2; + int ret; + + BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != + sizeof(basic.bt3_lookup_table)); + + if (priv->lib->bt_params) { + /* + * newer generation of devices (2000 series and newer) + * use the version 2 of the bt command + * we need to make sure sending the host command + * with correct data structure to avoid uCode assert + */ + if (priv->lib->bt_params->bt_session_2) { + bt_cmd_v2.prio_boost = cpu_to_le32( + priv->lib->bt_params->bt_prio_boost); + bt_cmd_v2.tx_prio_boost = 0; + bt_cmd_v2.rx_prio_boost = 0; + } else { + /* older version only has 8 bits */ + WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF); + bt_cmd_v1.prio_boost = + priv->lib->bt_params->bt_prio_boost; + bt_cmd_v1.tx_prio_boost = 0; + bt_cmd_v1.rx_prio_boost = 0; + } + } else { + IWL_ERR(priv, "failed to construct BT Coex Config\n"); + return; + } + + /* + * Possible situations when BT needs to take over for receive, + * at the same time where STA needs to response to AP's frame(s), + * reduce the tx power of the required response frames, by that, + * allow the concurrent BT receive & WiFi transmit + * (BT - ANT A, WiFi -ANT B), without interference to one another + * + * Reduced tx power apply to control frames only (ACK/Back/CTS) + * when indicated by the BT config command + */ + basic.kill_ack_mask = priv->kill_ack_mask; + basic.kill_cts_mask = priv->kill_cts_mask; + if (priv->reduced_txpower) + basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR; + basic.valid = priv->bt_valid; + + /* + * Configure BT coex mode to "no coexistence" when the + * user disabled BT coexistence, we have no interface + * (might be in monitor mode), or the interface is in + * IBSS mode (no proper uCode support for coex then). + */ + if (!iwlwifi_mod_params.bt_coex_active || + priv->iw_mode == NL80211_IFTYPE_ADHOC) { + basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; + } else { + basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << + IWLAGN_BT_FLAG_COEX_MODE_SHIFT; + + if (!priv->bt_enable_pspoll) + basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; + else + basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; + + if (priv->bt_ch_announce) + basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; + IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags); + } + priv->bt_enable_flag = basic.flags; + if (priv->bt_full_concurrent) + memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup, + sizeof(iwlagn_concurrent_lookup)); + else + memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup, + sizeof(iwlagn_def_3w_lookup)); + + IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n", + basic.flags ? "active" : "disabled", + priv->bt_full_concurrent ? + "full concurrency" : "3-wire"); + + if (priv->lib->bt_params->bt_session_2) { + memcpy(&bt_cmd_v2.basic, &basic, + sizeof(basic)); + ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, + 0, sizeof(bt_cmd_v2), &bt_cmd_v2); + } else { + memcpy(&bt_cmd_v1.basic, &basic, + sizeof(basic)); + ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, + 0, sizeof(bt_cmd_v1), &bt_cmd_v1); + } + if (ret) + IWL_ERR(priv, "failed to send BT Coex Config\n"); + +} + +void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena) +{ + struct iwl_rxon_context *ctx, *found_ctx = NULL; + bool found_ap = false; + + lockdep_assert_held(&priv->mutex); + + /* Check whether AP or GO mode is active. */ + if (rssi_ena) { + for_each_context(priv, ctx) { + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP && + iwl_is_associated_ctx(ctx)) { + found_ap = true; + break; + } + } + } + + /* + * If disable was received or If GO/AP mode, disable RSSI + * measurements. + */ + if (!rssi_ena || found_ap) { + if (priv->cur_rssi_ctx) { + ctx = priv->cur_rssi_ctx; + ieee80211_disable_rssi_reports(ctx->vif); + priv->cur_rssi_ctx = NULL; + } + return; + } + + /* + * If rssi measurements need to be enabled, consider all cases now. + * Figure out how many contexts are active. + */ + for_each_context(priv, ctx) { + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && + iwl_is_associated_ctx(ctx)) { + found_ctx = ctx; + break; + } + } + + /* + * rssi monitor already enabled for the correct interface...nothing + * to do. + */ + if (found_ctx == priv->cur_rssi_ctx) + return; + + /* + * Figure out if rssi monitor is currently enabled, and needs + * to be changed. If rssi monitor is already enabled, disable + * it first else just enable rssi measurements on the + * interface found above. + */ + if (priv->cur_rssi_ctx) { + ctx = priv->cur_rssi_ctx; + if (ctx->vif) + ieee80211_disable_rssi_reports(ctx->vif); + } + + priv->cur_rssi_ctx = found_ctx; + + if (!found_ctx) + return; + + ieee80211_enable_rssi_reports(found_ctx->vif, + IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD, + IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD); +} + +static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg) +{ + return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3SCOESCO_POS; +} + +static void iwlagn_bt_traffic_change_work(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, bt_traffic_change_work); + struct iwl_rxon_context *ctx; + int smps_request = -1; + + if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { + /* bt coex disabled */ + return; + } + + /* + * Note: bt_traffic_load can be overridden by scan complete and + * coex profile notifications. Ignore that since only bad consequence + * can be not matching debug print with actual state. + */ + IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n", + priv->bt_traffic_load); + + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + if (priv->bt_status) + smps_request = IEEE80211_SMPS_DYNAMIC; + else + smps_request = IEEE80211_SMPS_AUTOMATIC; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + smps_request = IEEE80211_SMPS_DYNAMIC; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + smps_request = IEEE80211_SMPS_STATIC; + break; + default: + IWL_ERR(priv, "Invalid BT traffic load: %d\n", + priv->bt_traffic_load); + break; + } + + mutex_lock(&priv->mutex); + + /* + * We can not send command to firmware while scanning. When the scan + * complete we will schedule this work again. We do check with mutex + * locked to prevent new scan request to arrive. We do not check + * STATUS_SCANNING to avoid race when queue_work two times from + * different notifications, but quit and not perform any work at all. + */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) + goto out; + + iwl_update_chain_flags(priv); + + if (smps_request != -1) { + priv->current_ht_config.smps = smps_request; + for_each_context(priv, ctx) { + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) + ieee80211_request_smps(ctx->vif, smps_request); + } + } + + /* + * Dynamic PS poll related functionality. Adjust RSSI measurements if + * necessary. + */ + iwlagn_bt_coex_rssi_monitor(priv); +out: + mutex_unlock(&priv->mutex); +} + +/* + * If BT sco traffic, and RSSI monitor is enabled, move measurements to the + * correct interface or disable it if this is the last interface to be + * removed. + */ +void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv) +{ + if (priv->bt_is_sco && + priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS) + iwlagn_bt_adjust_rssi_monitor(priv, true); + else + iwlagn_bt_adjust_rssi_monitor(priv, false); +} + +static void iwlagn_print_uartmsg(struct iwl_priv *priv, + struct iwl_bt_uart_msg *uart_msg) +{ + IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, " + "Update Req = 0x%X\n", + (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >> + BT_UART_MSG_FRAME1MSGTYPE_POS, + (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >> + BT_UART_MSG_FRAME1SSN_POS, + (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >> + BT_UART_MSG_FRAME1UPDATEREQ_POS); + + IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, " + "Chl_SeqN = 0x%X, In band = 0x%X\n", + (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >> + BT_UART_MSG_FRAME2OPENCONNECTIONS_POS, + (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >> + BT_UART_MSG_FRAME2TRAFFICLOAD_POS, + (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >> + BT_UART_MSG_FRAME2CHLSEQN_POS, + (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >> + BT_UART_MSG_FRAME2INBAND_POS); + + IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " + "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n", + (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3SCOESCO_POS, + (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3SNIFF_POS, + (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3A2DP_POS, + (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3ACL_POS, + (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3MASTER_POS, + (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3OBEX_POS); + + IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n", + (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >> + BT_UART_MSG_FRAME4IDLEDURATION_POS); + + IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " + "eSCO Retransmissions = 0x%X\n", + (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >> + BT_UART_MSG_FRAME5TXACTIVITY_POS, + (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >> + BT_UART_MSG_FRAME5RXACTIVITY_POS, + (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >> + BT_UART_MSG_FRAME5ESCORETRANSMIT_POS); + + IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n", + (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >> + BT_UART_MSG_FRAME6SNIFFINTERVAL_POS, + (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> + BT_UART_MSG_FRAME6DISCOVERABLE_POS); + + IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = " + "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n", + (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> + BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, + (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >> + BT_UART_MSG_FRAME7PAGE_POS, + (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >> + BT_UART_MSG_FRAME7INQUIRY_POS, + (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >> + BT_UART_MSG_FRAME7CONNECTABLE_POS); +} + +static bool iwlagn_set_kill_msk(struct iwl_priv *priv, + struct iwl_bt_uart_msg *uart_msg) +{ + bool need_update = false; + u8 kill_msk = IWL_BT_KILL_REDUCE; + static const __le32 bt_kill_ack_msg[3] = { + IWLAGN_BT_KILL_ACK_MASK_DEFAULT, + IWLAGN_BT_KILL_ACK_CTS_MASK_SCO, + IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE}; + static const __le32 bt_kill_cts_msg[3] = { + IWLAGN_BT_KILL_CTS_MASK_DEFAULT, + IWLAGN_BT_KILL_ACK_CTS_MASK_SCO, + IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE}; + + if (!priv->reduced_txpower) + kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) + ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT; + if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] || + priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) { + priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK; + priv->kill_ack_mask = bt_kill_ack_msg[kill_msk]; + priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK; + priv->kill_cts_mask = bt_kill_cts_msg[kill_msk]; + need_update = true; + } + return need_update; +} + +/* + * Upon RSSI changes, sends a bt config command with following changes + * 1. enable/disable "reduced control frames tx power + * 2. update the "kill)ack_mask" and "kill_cts_mask" + * + * If "reduced tx power" is enabled, uCode shall + * 1. ACK/Back/CTS rate shall reduced to 6Mbps + * 2. not use duplciate 20/40MHz mode + */ +static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv, + struct iwl_bt_uart_msg *uart_msg) +{ + bool need_update = false; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int ave_rssi; + + if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) { + IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n"); + return false; + } + + ave_rssi = ieee80211_ave_rssi(ctx->vif); + if (!ave_rssi) { + /* no rssi data, no changes to reduce tx power */ + IWL_DEBUG_COEX(priv, "no rssi data available\n"); + return need_update; + } + if (!priv->reduced_txpower && + !iwl_is_associated(priv, IWL_RXON_CTX_PAN) && + (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) && + (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK | + BT_UART_MSG_FRAME3OBEX_MSK)) && + !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK | + BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) { + /* enabling reduced tx power */ + priv->reduced_txpower = true; + priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR; + need_update = true; + } else if (priv->reduced_txpower && + (iwl_is_associated(priv, IWL_RXON_CTX_PAN) || + (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) || + (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK | + BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) || + !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK | + BT_UART_MSG_FRAME3OBEX_MSK)))) { + /* disable reduced tx power */ + priv->reduced_txpower = false; + priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR; + need_update = true; + } + + return need_update; +} + +static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data; + struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; + + if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { + /* bt coex disabled */ + return; + } + + IWL_DEBUG_COEX(priv, "BT Coex notification:\n"); + IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status); + IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load); + IWL_DEBUG_COEX(priv, " CI compliance: %d\n", + coex->bt_ci_compliance); + iwlagn_print_uartmsg(priv, uart_msg); + + priv->last_bt_traffic_load = priv->bt_traffic_load; + priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg); + + if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { + if (priv->bt_status != coex->bt_status || + priv->last_bt_traffic_load != coex->bt_traffic_load) { + if (coex->bt_status) { + /* BT on */ + if (!priv->bt_ch_announce) + priv->bt_traffic_load = + IWL_BT_COEX_TRAFFIC_LOAD_HIGH; + else + priv->bt_traffic_load = + coex->bt_traffic_load; + } else { + /* BT off */ + priv->bt_traffic_load = + IWL_BT_COEX_TRAFFIC_LOAD_NONE; + } + priv->bt_status = coex->bt_status; + queue_work(priv->workqueue, + &priv->bt_traffic_change_work); + } + } + + /* schedule to send runtime bt_config */ + /* check reduce power before change ack/cts kill mask */ + if (iwlagn_fill_txpower_mode(priv, uart_msg) || + iwlagn_set_kill_msk(priv, uart_msg)) + queue_work(priv->workqueue, &priv->bt_runtime_config); + + + /* FIXME: based on notification, adjust the prio_boost */ + + priv->bt_ci_compliance = coex->bt_ci_compliance; +} + +void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] = + iwlagn_bt_coex_profile_notif; +} + +void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->bt_traffic_change_work, + iwlagn_bt_traffic_change_work); +} + +void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->bt_traffic_change_work); +} + +static bool is_single_rx_stream(struct iwl_priv *priv) +{ + return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || + priv->current_ht_config.single_chain_sufficient; +} + +#define IWL_NUM_RX_CHAINS_MULTIPLE 3 +#define IWL_NUM_RX_CHAINS_SINGLE 2 +#define IWL_NUM_IDLE_CHAINS_DUAL 2 +#define IWL_NUM_IDLE_CHAINS_SINGLE 1 + +/* + * Determine how many receiver/antenna chains to use. + * + * More provides better reception via diversity. Fewer saves power + * at the expense of throughput, but only when not in powersave to + * start with. + * + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) +{ + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && + (priv->bt_full_concurrent || + priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { + /* + * only use chain 'A' in bt high traffic load or + * full concurrency mode + */ + return IWL_NUM_RX_CHAINS_SINGLE; + } + /* # of Rx chains to use when expecting MIMO. */ + if (is_single_rx_stream(priv)) + return IWL_NUM_RX_CHAINS_SINGLE; + else + return IWL_NUM_RX_CHAINS_MULTIPLE; +} + +/* + * When we are in power saving mode, unless device support spatial + * multiplexing power save, use the active count for rx chain count. + */ +static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) +{ + /* # Rx chains when idling, depending on SMPS mode */ + switch (priv->current_ht_config.smps) { + case IEEE80211_SMPS_STATIC: + case IEEE80211_SMPS_DYNAMIC: + return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_OFF: + return active_cnt; + default: + WARN(1, "invalid SMPS mode %d", + priv->current_ht_config.smps); + return active_cnt; + } +} + +/* up to 4 chains */ +static u8 iwl_count_chain_bitmap(u32 chain_bitmap) +{ + u8 res; + res = (chain_bitmap & BIT(0)) >> 0; + res += (chain_bitmap & BIT(1)) >> 1; + res += (chain_bitmap & BIT(2)) >> 2; + res += (chain_bitmap & BIT(3)) >> 3; + return res; +} + +/** + * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image + * + * Selects how many and which Rx receivers/antennas/chains to use. + * This should not be used for scan command ... it puts data in wrong place. + */ +void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + bool is_single = is_single_rx_stream(priv); + bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); + u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; + u32 active_chains; + u16 rx_chain; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl_chain_noise_calibration() + * checks which antennas actually *are* connected. */ + if (priv->chain_noise_data.active_chains) + active_chains = priv->chain_noise_data.active_chains; + else + active_chains = priv->nvm_data->valid_rx_ant; + + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && + (priv->bt_full_concurrent || + priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { + /* + * only use chain 'A' in bt high traffic load or + * full concurrency mode + */ + active_chains = first_antenna(active_chains); + } + + rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; + + /* How many receivers should we use? */ + active_rx_cnt = iwl_get_active_rx_chain_count(priv); + idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt); + + + /* correct rx chain count according hw settings + * and chain noise calibration + */ + valid_rx_cnt = iwl_count_chain_bitmap(active_chains); + if (valid_rx_cnt < active_rx_cnt) + active_rx_cnt = valid_rx_cnt; + + if (valid_rx_cnt < idle_rx_cnt) + idle_rx_cnt = valid_rx_cnt; + + rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; + rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; + + ctx->staging.rx_chain = cpu_to_le16(rx_chain); + + if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) + ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", + ctx->staging.rx_chain, + active_rx_cnt, idle_rx_cnt); + + WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || + active_rx_cnt < idle_rx_cnt); +} + +u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) +{ + int i; + u8 ind = ant; + + if (priv->band == IEEE80211_BAND_2GHZ && + priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) + return 0; + + for (i = 0; i < RATE_ANT_NUM - 1; i++) { + ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; + if (valid & BIT(ind)) + return ind; + } + return ant; +} + +#ifdef CONFIG_PM_SLEEP +static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) +{ + int i; + + for (i = 0; i < IWLAGN_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); +} + +struct wowlan_key_data { + struct iwl_rxon_context *ctx; + struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwlagn_wowlan_tkip_params_cmd *tkip; + const u8 *bssid; + bool error, use_rsc_tsc, use_tkip; +}; + + +static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct wowlan_key_data *data = _data; + struct iwl_rxon_context *ctx = data->ctx; + struct aes_sc *aes_sc, *aes_tx_sc = NULL; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct iwlagn_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWLAGN_P1K_SIZE]; + int ret, i; + + mutex_lock(&priv->mutex); + + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta && !ctx->key_mapping_keys) + ret = iwl_set_default_wep_key(priv, ctx, key); + else + ret = iwl_set_dynamic_key(priv, ctx, key, sta); + + if (ret) { + IWL_ERR(priv, "Error setting key during suspend!\n"); + data->error = true; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; + + rx_p1ks = data->tkip->rx_uni; + + ieee80211_get_key_tx_seq(key, &seq); + tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + + ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); + + memcpy(data->tkip->mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + rx_mic_key = data->tkip->mic_keys.rx_unicast; + } else { + tkip_sc = + data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + rx_p1ks = data->tkip->rx_multi; + rx_mic_key = data->tkip->mic_keys.rx_mcast; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, data->bssid, + cur_rx_iv32 + 1, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + data->use_tkip = true; + data->use_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (sta) { + u8 *pn = seq.ccmp.pn; + + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + + ieee80211_get_key_tx_seq(key, &seq); + aes_tx_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } else + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc[i].pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + data->use_rsc_tsc = true; + break; + } + + mutex_unlock(&priv->mutex); +} + +int iwlagn_send_patterns(struct iwl_priv *priv, + struct cfg80211_wowlan *wowlan) +{ + struct iwlagn_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = REPLY_WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int i, err; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); + + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = iwl_dvm_send_cmd(priv, &cmd); + kfree(pattern_cmd); + return err; +} + +int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan) +{ + struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; + struct iwl_rxon_cmd rxon; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; + struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; + struct iwlagn_d3_config_cmd d3_cfg_cmd = { + /* + * Program the minimum sleep time to 10 seconds, as many + * platforms have issues processing a wakeup signal while + * still being in the process of suspending. + */ + .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), + }; + struct wowlan_key_data key_data = { + .ctx = ctx, + .bssid = ctx->active.bssid_addr, + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret, i; + u16 seq; + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) + return -ENOMEM; + + memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); + + /* + * We know the last used seqno, and the uCode expects to know that + * one, it will increment before TX. + */ + seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; + wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); + + /* + * For QoS counters, we store the one to use next, so subtract 0x10 + * since the uCode will add 0x10 before using the value. + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + seq = priv->tid_data[IWL_AP_ID][i].seq_number; + seq -= 0x10; + wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); + } + + if (wowlan->disconnect) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->n_patterns) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); + + if (wowlan->rfkill_release) + d3_cfg_cmd.wakeup_flags |= + cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL); + + iwl_scan_cancel_timeout(priv, 200); + + memcpy(&rxon, &ctx->active, sizeof(rxon)); + + priv->ucode_loaded = false; + iwl_trans_stop_device(priv->trans); + + priv->wowlan = true; + + ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN); + if (ret) + goto out; + + /* now configure WoWLAN ucode */ + ret = iwl_alive_start(priv); + if (ret) + goto out; + + memcpy(&ctx->staging, &rxon, sizeof(rxon)); + ret = iwlagn_commit_rxon(priv, ctx); + if (ret) + goto out; + + ret = iwl_power_update_mode(priv, true); + if (ret) + goto out; + + if (!iwlwifi_mod_params.sw_crypto) { + /* mark all keys clear */ + priv->ucode_key_table = 0; + ctx->key_mapping_keys = 0; + + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&priv->mutex); + ieee80211_iter_keys(priv->hw, ctx->vif, + iwlagn_wowlan_program_keys, + &key_data); + mutex_lock(&priv->mutex); + if (key_data.error) { + ret = -EIO; + goto out; + } + + if (key_data.use_rsc_tsc) { + struct iwl_host_cmd rsc_tsc_cmd = { + .id = REPLY_WOWLAN_TSC_RSC_PARAMS, + .data[0] = key_data.rsc_tsc, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(*key_data.rsc_tsc), + }; + + ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd); + if (ret) + goto out; + } + + if (key_data.use_tkip) { + ret = iwl_dvm_send_cmd_pdu(priv, + REPLY_WOWLAN_TKIP_PARAMS, + 0, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto out; + } + + if (priv->have_rekey_data) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = priv->replay_ctr; + + ret = iwl_dvm_send_cmd_pdu(priv, + REPLY_WOWLAN_KEK_KCK_MATERIAL, + 0, sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto out; + } + } + + ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0, + sizeof(d3_cfg_cmd), &d3_cfg_cmd); + if (ret) + goto out; + + ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER, + 0, sizeof(wakeup_filter_cmd), + &wakeup_filter_cmd); + if (ret) + goto out; + + ret = iwlagn_send_patterns(priv, wowlan); + out: + kfree(key_data.rsc_tsc); + return ret; +} +#endif + +int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { + IWL_WARN(priv, "Not sending command - %s KILL\n", + iwl_is_rfkill(priv) ? "RF" : "CT"); + return -EIO; + } + + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + iwl_dvm_get_cmd_string(cmd->id)); + return -EIO; + } + + /* + * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag + * in iwl_down but cancel the workers only later. + */ + if (!priv->ucode_loaded) { + IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); + return -EIO; + } + + /* + * Synchronous commands from this op-mode must hold + * the mutex, this ensures we don't try to send two + * (or more) synchronous commands at a time. + */ + if (!(cmd->flags & CMD_ASYNC)) + lockdep_assert_held(&priv->mutex); + + return iwl_trans_send_cmd(priv->trans, cmd); +} + +int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, + u32 flags, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { len, }, + .data = { data, }, + .flags = flags, + }; + + return iwl_dvm_send_cmd(priv, &cmd); +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c new file mode 100644 index 000000000000..b3ad34e8bf5a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -0,0 +1,1655 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "iwl-io.h" +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-modparams.h" + +#include "dev.h" +#include "calib.h" +#include "agn.h" + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, +}; + +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_dualmode[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_sta_ap_limits, + .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), + }, +}; + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +int iwlagn_mac_setup_register(struct iwl_priv *priv, + const struct iwl_ucode_capabilities *capa) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + struct iwl_rxon_context *ctx; + + hw->rate_control_algorithm = "iwl-agn-rs"; + + /* Tell mac80211 our characteristics */ + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC); + ieee80211_hw_set(hw, SPECTRUM_MGMT); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, QUEUE_CONTROL); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + + hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; + hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; + + /* + * Including the following line will crash some AP's. This + * workaround removes the stimulus which causes the crash until + * the AP software can be fixed. + hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + */ + + if (priv->nvm_data->sku_cap_11n_enable) + hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_STATIC_SMPS; + + /* + * Enable 11w if advertised by firmware and software crypto + * is not enabled (as the firmware will interpret some mgmt + * packets, so enabling it with software crypto isn't safe) + */ + if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && + !iwlwifi_mod_params.sw_crypto) + ieee80211_hw_set(hw, MFP_CAPABLE); + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + for_each_context(priv, ctx) { + hw->wiphy->interface_modes |= ctx->interface_modes; + hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; + } + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { + hw->wiphy->iface_combinations = + iwlagn_iface_combinations_dualmode; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_dualmode); + } + + hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | + REGULATORY_DISABLE_BEACON_HINTS; + +#ifdef CONFIG_PM_SLEEP + if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + priv->trans->ops->d3_suspend && + priv->trans->ops->d3_resume && + device_can_wakeup(priv->trans->dev)) { + priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; + if (!iwlwifi_mod_params.sw_crypto) + priv->wowlan_support.flags |= + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE; + + priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + priv->wowlan_support.pattern_min_len = + IWLAGN_WOWLAN_MIN_PATTERN_LEN; + priv->wowlan_support.pattern_max_len = + IWLAGN_WOWLAN_MAX_PATTERN_LEN; + hw->wiphy->wowlan = &priv->wowlan_support; + } +#endif + + if (iwlwifi_mod_params.power_save) + hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a max-length SSID element */ + hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34; + + /* + * We don't use all queues: 4 and 9 are unused and any + * aggregation queue gets mapped down to the AC queue. + */ + hw->queues = IWLAGN_FIRST_AMPDU_QUEUE; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->nvm_data->bands[IEEE80211_BAND_2GHZ]; + if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->nvm_data->bands[IEEE80211_BAND_5GHZ]; + + hw->wiphy->hw_version = priv->trans->hw_id; + + iwl_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + iwl_leds_exit(priv); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + +void iwlagn_mac_unregister(struct iwl_priv *priv) +{ + if (!priv->mac80211_registered) + return; + iwl_leds_exit(priv); + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; +} + +static int __iwl_up(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret; + + lockdep_assert_held(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + for_each_context(priv, ctx) { + ret = iwlagn_alloc_bcast_station(priv, ctx); + if (ret) { + iwl_dealloc_bcast_stations(priv); + return ret; + } + } + + ret = iwl_trans_start_hw(priv->trans); + if (ret) { + IWL_ERR(priv, "Failed to start HW: %d\n", ret); + goto error; + } + + ret = iwl_run_init_ucode(priv); + if (ret) { + IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); + goto error; + } + + ret = iwl_trans_start_hw(priv->trans); + if (ret) { + IWL_ERR(priv, "Failed to start HW: %d\n", ret); + goto error; + } + + ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + ret = iwl_alive_start(priv); + if (ret) + goto error; + return 0; + + error: + set_bit(STATUS_EXIT_PENDING, &priv->status); + iwl_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + IWL_ERR(priv, "Unable to initialize device.\n"); + return ret; +} + +static int iwlagn_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + ret = __iwl_up(priv); + mutex_unlock(&priv->mutex); + if (ret) + return ret; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Now we should be done, and the READY bit should be set. */ + if (WARN_ON(!test_bit(STATUS_READY, &priv->status))) + ret = -EIO; + + iwlagn_led_enable(priv); + + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static void iwlagn_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + mutex_lock(&priv->mutex); + iwl_down(priv); + mutex_unlock(&priv->mutex); + + iwl_cancel_deferred_work(priv); + + flush_workqueue(priv->workqueue); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + if (iwlwifi_mod_params.sw_crypto) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->mutex); + + if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) + goto out; + + memcpy(priv->kek, data->kek, NL80211_KEK_LEN); + memcpy(priv->kck, data->kck, NL80211_KCK_LEN); + priv->replay_ctr = + cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + priv->have_rekey_data = true; + + out: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +#ifdef CONFIG_PM_SLEEP + +static int iwlagn_mac_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int ret; + + if (WARN_ON(!wowlan)) + return -EINVAL; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->mutex); + + /* Don't attempt WoWLAN when not associated, tear down instead. */ + if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || + !iwl_is_associated_ctx(ctx)) { + ret = 1; + goto out; + } + + ret = iwlagn_suspend(priv, wowlan); + if (ret) + goto error; + + /* let the ucode operate on its own */ + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + iwl_trans_d3_suspend(priv->trans, false); + + goto out; + + error: + priv->wowlan = false; + iwlagn_prepare_restart(priv); + ieee80211_restart_hw(priv->hw); + out: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +struct iwl_resume_data { + struct iwl_priv *priv; + struct iwlagn_wowlan_status *cmd; + bool valid; +}; + +static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_resume_data *resume_data = data; + struct iwl_priv *priv = resume_data->priv; + + if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) { + IWL_ERR(priv, "rx wrong size data\n"); + return true; + } + memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd)); + resume_data->valid = true; + + return true; +} + +static int iwlagn_mac_resume(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif; + u32 base; + int ret; + enum iwl_d3_status d3_status; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + struct iwl_notification_wait status_wait; + static const u16 status_cmd[] = { + REPLY_WOWLAN_GET_STATUS, + }; + struct iwlagn_wowlan_status status_data = {}; + struct iwl_resume_data resume_data = { + .priv = priv, + .cmd = &status_data, + .valid = false, + }; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; +#ifdef CONFIG_IWLWIFI_DEBUGFS + const struct fw_img *img; +#endif + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->mutex); + + /* we'll clear ctx->vif during iwlagn_prepare_restart() */ + vif = ctx->vif; + + ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); + if (ret) + goto out_unlock; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(priv, "Device was reset during suspend\n"); + goto out_unlock; + } + + /* uCode is no longer operating by itself */ + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + base = priv->device_pointers.error_event_table; + if (!iwlagn_hw_valid_rtc_data_addr(base)) { + IWL_WARN(priv, "Invalid error table during resume!\n"); + goto out_unlock; + } + + iwl_trans_read_mem_bytes(priv->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid) { + IWL_INFO(priv, "error table is valid (%d, 0x%x)\n", + err_info.valid, err_info.error_id); + if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + wakeup.rfkill_release = true; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + goto out_unlock; + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + img = &priv->fw->img[IWL_UCODE_WOWLAN]; + if (!priv->wowlan_sram) + priv->wowlan_sram = + kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len, + GFP_KERNEL); + + if (priv->wowlan_sram) + iwl_trans_read_mem(priv->trans, 0x800000, + priv->wowlan_sram, + img->sec[IWL_UCODE_SECTION_DATA].len / 4); +#endif + + /* + * This is very strange. The GET_STATUS command is sent but the device + * doesn't reply properly, it seems it doesn't close the RBD so one is + * always left open ... As a result, we need to send another command + * and have to reset the driver afterwards. As we need to switch to + * runtime firmware again that'll happen. + */ + + iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd, + ARRAY_SIZE(status_cmd), iwl_resume_status_fn, + &resume_data); + + iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL); + iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL); + /* an RBD is left open in the firmware now! */ + + ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5); + if (ret) + goto out_unlock; + + if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) { + u32 reasons = le32_to_cpu(status_data.wakeup_reason); + struct cfg80211_wowlan_wakeup *wakeup_report; + + IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons); + + if (reasons) { + if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET) + wakeup.magic_pkt = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH) + wakeup.pattern_idx = status_data.pattern_number; + if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE)) + wakeup.disconnect = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL) + wakeup.gtk_rekey_failure = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ) + wakeup.eap_identity_req = true; + if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE) + wakeup.four_way_handshake = true; + wakeup_report = &wakeup; + } else { + wakeup_report = NULL; + } + + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + } + + priv->wowlan = false; + + iwlagn_prepare_restart(priv); + + memset((void *)&ctx->active, 0, sizeof(ctx->active)); + iwl_connection_init_rx_config(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); + + out_unlock: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + ieee80211_resume_disconnect(vif); + + return 1; +} + +static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + device_set_wakeup_enable(priv->trans->dev, enabled); +} +#endif + +static void iwlagn_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + if (iwlagn_tx_skb(priv, control->sta, skb)) + ieee80211_free_txskb(hw, skb); +} + +static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); +} + +static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwlwifi_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /* fall through */ + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + break; + } + + /* + * We could program these keys into the hardware as well, but we + * don't expect much multicast traffic in IBSS and having keys + * for more stations is probably more useful. + * + * Mark key TX-only and return 0. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + key->hw_key_idx = WEP_INVALID_OFFSET; + return 0; + } + + /* If they key was TX-only, accept deletion */ + if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) + return 0; + + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + + BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; + } + + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) { + ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); + break; + } + ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); + if (ret) { + /* + * can't add key for RX, but we don't need it + * in the device for TX so still return 0 + */ + ret = 0; + key->hw_key_idx = WEP_INVALID_OFFSET; + } + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl_remove_default_wep_key(priv, ctx, key); + else + ret = iwl_remove_dynamic_key(priv, ctx, key, sta); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* disabled by default */ + return false; +} + +static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size, bool amsdu) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int ret = -EINVAL; + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->nvm_data->sku_cap_11n_enable)) + return -EACCES; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + mutex_lock(&priv->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (!iwl_enable_rx_ampdu(priv->cfg)) + break; + IWL_DEBUG_HT(priv, "start Rx\n"); + ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl_sta_rx_agg_stop(priv, sta, tid); + break; + case IEEE80211_AMPDU_TX_START: + if (!priv->trans->ops->txq_enable) + break; + if (!iwl_enable_tx_ampdu(priv->cfg)) + break; + IWL_DEBUG_HT(priv, "start Tx\n"); + ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + IWL_DEBUG_HT(priv, "Flush Tx\n"); + ret = iwlagn_tx_agg_flush(priv, vif, sta, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); + if ((ret == 0) && (priv->agg_tids_count > 0)) { + priv->agg_tids_count--; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); + } + if (!priv->agg_tids_count && + priv->hw_params.use_rts_for_aggregation) { + /* + * switch off RTS/CTS if it was previously enabled + */ + sta_priv->lq_sta.lq.general_params.flags &= + ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), + &sta_priv->lq_sta.lq, CMD_ASYNC, false); + } + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size); + break; + } + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + return ret; +} + +static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret; + u8 sta_id; + + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->sta_id = IWL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + if (vif->type == NL80211_IFTYPE_AP) + sta_priv->client = true; + + ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, + is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + return ret; + } + + sta_priv->sta_id = sta_id; + + return 0; +} + +static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int ret; + + IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr); + + if (vif->type == NL80211_IFTYPE_STATION) { + /* + * Station will be removed from device when the RXON + * is set to unassociated -- just deactivate it here + * to avoid re-programming it. + */ + ret = 0; + iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr); + } else { + ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); + if (ret) + IWL_DEBUG_QUIET_RFKILL(priv, + "Error removing station %pM\n", sta->addr); + } + return ret; +} + +static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + enum { + NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT, + } op = NONE; + int ret; + + IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n", + sta->addr, old_state, new_state); + + mutex_lock(&priv->mutex); + if (vif->type == NL80211_IFTYPE_STATION) { + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) + op = ADD; + else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) + op = REMOVE; + else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) + op = HT_RATE_INIT; + } else { + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) + op = ADD_RATE_INIT; + else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) + op = REMOVE; + } + + switch (op) { + case ADD: + ret = iwlagn_mac_sta_add(hw, vif, sta); + if (ret) + break; + /* + * Clear the in-progress flag, the AP station entry was added + * but we'll initialize LQ only when we've associated (which + * would also clear the in-progress flag). This is necessary + * in case we never initialize LQ because association fails. + */ + spin_lock_bh(&priv->sta_lock); + priv->stations[iwl_sta_id(sta)].used &= + ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_bh(&priv->sta_lock); + break; + case REMOVE: + ret = iwlagn_mac_sta_remove(hw, vif, sta); + break; + case ADD_RATE_INIT: + ret = iwlagn_mac_sta_add(hw, vif, sta); + if (ret) + break; + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, + "Initializing rate scaling for station %pM\n", + sta->addr); + iwl_rs_rate_init(priv, sta, iwl_sta_id(sta)); + ret = 0; + break; + case HT_RATE_INIT: + /* Initialize rate scaling */ + ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta); + if (ret) + break; + IWL_DEBUG_INFO(priv, + "Initializing rate scaling for station %pM\n", + sta->addr); + iwl_rs_rate_init(priv, sta, iwl_sta_id(sta)); + ret = 0; + break; + default: + ret = 0; + break; + } + + /* + * mac80211 might WARN if we fail, but due the way we + * (badly) handle hard rfkill, we might fail here + */ + if (iwl_is_rfkill(priv)) + ret = 0; + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->chandef.chan; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + /* + * MULTI-FIXME + * When we add support for multiple interfaces, we need to + * revisit this. The channel switch command in the device + * only affects the BSS context, but what does that really + * mean? And what if we get a CSA on the second interface? + * This needs a lot of work. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u16 ch; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + if (iwl_is_rfkill(priv)) + goto out; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + goto out; + + if (!iwl_is_associated_ctx(ctx)) + goto out; + + if (!priv->lib->set_channel_switch) + goto out; + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) == ch) + goto out; + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + switch (cfg80211_get_chandef_type(&ch_switch->chandef)) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: + ctx->ht.is_40mhz = false; + ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + break; + case NL80211_CHAN_HT40MINUS: + ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + break; + case NL80211_CHAN_HT40PLUS: + ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + break; + } + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_set_rxon_channel(priv, channel, ctx); + iwl_set_rxon_ht(priv, ht_conf); + iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); + + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); + priv->switch_channel = cpu_to_le16(ch); + if (priv->lib->set_channel_switch(priv, ch_switch)) { + clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); + priv->switch_channel = 0; + ieee80211_chswitch_done(ctx->vif, false); + } + +out: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) +{ + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + return; + + if (ctx->vif) + ieee80211_chswitch_done(ctx->vif, is_success); +} + +static void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + for_each_context(priv, ctx) { + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + } + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 scd_queues; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); + goto done; + } + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); + goto done; + } + + scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1; + scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | + BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); + + if (drop) { + IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", + scd_queues); + if (iwlagn_txfifo_flush(priv, scd_queues)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; + } + } + + IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); + iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); +done: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwlagn_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + if (event->type != RSSI_EVENT) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + if (event->u.rssi.data == RSSI_EVENT_LOW) + priv->bt_enable_pspoll = true; + else if (event->u.rssi.data == RSSI_EVENT_HIGH) + priv->bt_enable_pspoll = false; + + queue_work(priv->workqueue, &priv->bt_runtime_config); + } else { + IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," + "ignoring RSSI callback\n"); + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + queue_work(priv->workqueue, &priv->beacon_update); + + return 0; +} + +static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int q; + + if (WARN_ON(!ctx)) + return -EINVAL; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + mutex_lock(&priv->mutex); + + ctx->qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + ctx->qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + ctx->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + return priv->ibss_manager == IWL_IBSS_MANAGER; +} + +static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + iwl_connection_init_rx_config(priv, ctx); + + iwlagn_set_rxon_chain(priv, ctx); + + return iwlagn_commit_rxon(priv, ctx); +} + +static int iwl_setup_interface(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err, ac; + + lockdep_assert_held(&priv->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + priv->iw_mode = vif->type; + + ctx->is_active = true; + + err = iwl_set_mode(priv, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist && + vif->type == NL80211_IFTYPE_ADHOC) { + /* + * pretend to have high BT traffic as long as we + * are operating in IBSS mode, as this will cause + * the rate scaling etc. to behave as intended. + */ + priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; + } + + /* set up queue mappings */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + vif->hw_queue[ac] = ctx->ac_to_queue[ac]; + + if (vif->type == NL80211_IFTYPE_AP) + vif->cab_queue = ctx->mcast_queue; + else + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + + return 0; +} + +static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *tmp, *ctx = NULL; + int err; + enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); + bool reset = false; + + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + viftype, vif->addr); + + mutex_lock(&priv->mutex); + + if (!iwl_is_ready_rf(priv)) { + IWL_WARN(priv, "Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + for_each_context(priv, tmp) { + u32 possible_modes = + tmp->interface_modes | tmp->exclusive_interface_modes; + + if (tmp->vif) { + /* On reset we need to add the same interface again */ + if (tmp->vif == vif) { + reset = true; + ctx = tmp; + break; + } + + /* check if this busy context is exclusive */ + if (tmp->exclusive_interface_modes & + BIT(tmp->vif->type)) { + err = -EINVAL; + goto out; + } + continue; + } + + if (!(possible_modes & BIT(viftype))) + continue; + + /* have maybe usable context w/o interface */ + ctx = tmp; + break; + } + + if (!ctx) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = ctx; + ctx->vif = vif; + + /* + * In SNIFFER device type, the firmware reports the FCS to + * the host, rather than snipping it off. Unfortunately, + * mac80211 doesn't (yet) provide a per-packet flag for + * this, so that we have to set the hardware flag based + * on the interfaces added. As the monitor interface can + * only be present by itself, and will be removed before + * other interfaces are added, this is safe. + */ + if (vif->type == NL80211_IFTYPE_MONITOR) + ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS); + else + __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags); + + err = iwl_setup_interface(priv, ctx); + if (!err || reset) + goto out; + + ctx->vif = NULL; + priv->iw_mode = NL80211_IFTYPE_STATION; + out: + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} + +static void iwl_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) +{ + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&priv->mutex); + + if (priv->scan_vif == vif) { + iwl_scan_cancel_timeout(priv, 200); + iwl_force_scan_end(priv); + } + + if (!mode_change) { + iwl_set_mode(priv, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } + + /* + * When removing the IBSS interface, overwrite the + * BT traffic load with the stored one from the last + * notification, if any. If this is a device that + * doesn't implement this, this has no effect since + * both values are the same and zero. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) + priv->bt_traffic_load = priv->last_bt_traffic_load; +} + +static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + if (WARN_ON(ctx->vif != vif)) { + struct iwl_rxon_context *tmp; + IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); + for_each_context(priv, tmp) + IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", + tmp->ctxid, tmp, tmp->vif); + } + ctx->vif = NULL; + + iwl_teardown_interface(priv, vif, false); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} + +static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_rxon_context *ctx, *tmp; + enum nl80211_iftype newviftype = newtype; + u32 interface_modes; + int err; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&priv->mutex); + + ctx = iwl_rxon_ctx_from_vif(vif); + + /* + * To simplify this code, only support changes on the + * BSS context. The PAN context is usually reassigned + * by creating/removing P2P interfaces anyway. + */ + if (ctx->ctxid != IWL_RXON_CTX_BSS) { + err = -EBUSY; + goto out; + } + + if (!ctx->vif || !iwl_is_ready_rf(priv)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + + /* Check if the switch is supported in the same context */ + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; + if (!(interface_modes & BIT(newtype))) { + err = -EBUSY; + goto out; + } + + if (ctx->exclusive_interface_modes & BIT(newtype)) { + for_each_context(priv, tmp) { + if (ctx == tmp) + continue; + + if (!tmp->is_active) + continue; + + /* + * The current mode switch would be exclusive, but + * another context is active ... refuse the switch. + */ + err = -EBUSY; + goto out; + } + } + + /* success */ + iwl_teardown_interface(priv, vif, true); + vif->type = newviftype; + vif->p2p = newp2p; + err = iwl_setup_interface(priv, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + + out: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return err; +} + +static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct cfg80211_scan_request *req = &hw_req->req; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&priv->mutex); + + /* + * If an internal scan is in progress, just set + * up the scan_request as per above. + */ + if (priv->scan_type != IWL_SCAN_NORMAL) { + IWL_DEBUG_SCAN(priv, + "SCAN request during internal scan - defer\n"); + priv->scan_request = req; + priv->scan_vif = vif; + ret = 0; + } else { + priv->scan_request = req; + priv->scan_vif = vif; + /* + * mac80211 will only ask for one band at a time + * so using channels[0] here is ok + */ + ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, + req->channels[0]->band); + if (ret) { + priv->scan_request = NULL; + priv->scan_vif = NULL; + } + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); + + mutex_unlock(&priv->mutex); + + return ret; +} + +static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) +{ + struct iwl_addsta_cmd cmd = { + .mode = STA_CONTROL_MODIFY_MSK, + .station_flags_msk = STA_FLG_PWR_SAVE_MSK, + .sta.sta_id = sta_id, + }; + + iwl_send_add_sta(priv, &cmd, CMD_ASYNC); +} + +static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int sta_id; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + switch (cmd) { + case STA_NOTIFY_SLEEP: + WARN_ON(!sta_priv->client); + sta_priv->asleep = true; + if (atomic_read(&sta_priv->pending_frames) > 0) + ieee80211_sta_block_awake(hw, sta, true); + break; + case STA_NOTIFY_AWAKE: + WARN_ON(!sta_priv->client); + if (!sta_priv->asleep) + break; + sta_priv->asleep = false; + sta_id = iwl_sta_id(sta); + if (sta_id != IWL_INVALID_STATION) + iwl_sta_modify_ps_wake(priv, sta_id); + break; + default: + break; + } + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +const struct ieee80211_ops iwlagn_hw_ops = { + .tx = iwlagn_mac_tx, + .start = iwlagn_mac_start, + .stop = iwlagn_mac_stop, +#ifdef CONFIG_PM_SLEEP + .suspend = iwlagn_mac_suspend, + .resume = iwlagn_mac_resume, + .set_wakeup = iwlagn_mac_set_wakeup, +#endif + .add_interface = iwlagn_mac_add_interface, + .remove_interface = iwlagn_mac_remove_interface, + .change_interface = iwlagn_mac_change_interface, + .config = iwlagn_mac_config, + .configure_filter = iwlagn_configure_filter, + .set_key = iwlagn_mac_set_key, + .update_tkip_key = iwlagn_mac_update_tkip_key, + .set_rekey_data = iwlagn_mac_set_rekey_data, + .conf_tx = iwlagn_mac_conf_tx, + .bss_info_changed = iwlagn_bss_info_changed, + .ampdu_action = iwlagn_mac_ampdu_action, + .hw_scan = iwlagn_mac_hw_scan, + .sta_notify = iwlagn_mac_sta_notify, + .sta_state = iwlagn_mac_sta_state, + .channel_switch = iwlagn_mac_channel_switch, + .flush = iwlagn_mac_flush, + .tx_last_beacon = iwlagn_mac_tx_last_beacon, + .event_callback = iwlagn_mac_event_callback, + .set_tim = iwlagn_mac_set_tim, +}; + +/* This function both allocates and initializes hw and priv. */ +struct ieee80211_hw *iwl_alloc_all(void) +{ + struct iwl_priv *priv; + struct iwl_op_mode *op_mode; + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) + + sizeof(struct iwl_op_mode), &iwlagn_hw_ops); + if (!hw) + goto out; + + op_mode = hw->priv; + priv = IWL_OP_MODE_GET_DVM(op_mode); + priv->hw = hw; + +out: + return hw; +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c new file mode 100644 index 000000000000..e7616f0ee6e8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -0,0 +1,2077 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "iwl-eeprom-read.h" +#include "iwl-eeprom-parse.h" +#include "iwl-io.h" +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "iwl-prph.h" + +#include "dev.h" +#include "calib.h" +#include "agn.h" + + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +static const struct iwl_op_mode_ops iwl_dvm_ops; + +void iwl_update_chain_flags(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) { + iwlagn_set_rxon_chain(priv, ctx); + if (ctx->active.rx_chain != ctx->staging.rx_chain) + iwlagn_commit_rxon(priv, ctx); + } +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void iwl_set_beacon_tim(struct iwl_priv *priv, + struct iwl_tx_beacon_cmd *tx_beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx+1]; + } else + IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); +} + +int iwlagn_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + struct iwl_host_cmd cmd = { + .id = REPLY_TX_BEACON, + }; + struct ieee80211_tx_info *info; + u32 frame_size; + u32 rate_flags; + u32 rate; + + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); + return 0; + } + + if (WARN_ON(!priv->beacon_skb)) + return -EINVAL; + + /* Allocate beacon command */ + if (!priv->beacon_cmd) + priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL); + tx_beacon_cmd = priv->beacon_cmd; + if (!tx_beacon_cmd) + return -ENOMEM; + + frame_size = priv->beacon_skb->len; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data, + frame_size); + + /* Set up packet rate and flags */ + info = IEEE80211_SKB_CB(priv->beacon_skb); + + /* + * Let's set up the rate at least somewhat correctly; + * it will currently not actually be used by the uCode, + * it uses the broadcast station's rate instead. + */ + if (info->control.rates[0].idx < 0 || + info->control.rates[0].flags & IEEE80211_TX_RC_MCS) + rate = 0; + else + rate = info->control.rates[0].idx; + + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->nvm_data->valid_tx_ant); + rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* In mac80211, rates for 5 GHz start at 0 */ + if (info->band == IEEE80211_BAND_5GHZ) + rate += IWL_FIRST_OFDM_RATE; + else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + tx_beacon_cmd->tx.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, rate_flags); + + /* Submit command */ + cmd.len[0] = sizeof(*tx_beacon_cmd); + cmd.data[0] = tx_beacon_cmd; + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + cmd.len[1] = frame_size; + cmd.data[1] = priv->beacon_skb->data; + cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; + + return iwl_dvm_send_cmd(priv, &cmd); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + mutex_lock(&priv->mutex); + if (!priv->beacon_ctx) { + IWL_ERR(priv, "updating beacon w/o beacon context!\n"); + goto out; + } + + if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) { + /* + * The ucode will send beacon notifications even in + * IBSS mode, but we don't want to process them. But + * we need to defer the type check to here due to + * requiring locking around the beacon_ctx access. + */ + goto out; + } + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif); + if (!beacon) { + IWL_ERR(priv, "update beacon failed -- keeping old\n"); + goto out; + } + + /* new beacon skb is allocated every time; dispose previous.*/ + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = beacon; + + iwlagn_send_beacon_cmd(priv); + out: + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_bt_runtime_config(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, bt_runtime_config); + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + /* dont send host command if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + goto out; + + iwlagn_send_advance_bt_config(priv); +out: + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_bt_full_concurrency(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, bt_full_concurrency); + struct iwl_rxon_context *ctx; + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + /* dont send host command if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + goto out; + + IWL_DEBUG_INFO(priv, "BT coex in %s mode\n", + priv->bt_full_concurrent ? + "full concurrency" : "3-wire"); + + /* + * LQ & RXON updated cmds must be sent before BT Config cmd + * to avoid 3-wire collisions + */ + for_each_context(priv, ctx) { + iwlagn_set_rxon_chain(priv, ctx); + iwlagn_commit_rxon(priv, ctx); + } + + iwlagn_send_advance_bt_config(priv); +out: + mutex_unlock(&priv->mutex); +} + +int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) +{ + struct iwl_statistics_cmd statistics_cmd = { + .configuration_flags = + clear ? IWL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + CMD_ASYNC, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); + else + return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); +} + +/** + * iwl_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to send a statistics request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void iwl_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + return; + + iwl_send_statistics_request(priv, CMD_ASYNC, false); +} + + +static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, + u32 start_idx, u32 num_events, + u32 capacity, u32 mode) +{ + u32 i; + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (mode == 0) + ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); + else + ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); + + /* Make sure device is powered up for SRAM reads */ + if (!iwl_trans_grab_nic_access(priv->trans, false, ®_flags)) + return; + + /* Set starting address; reads will auto-increment */ + iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr); + + /* + * Refuse to read more than would have fit into the log from + * the current start_idx. This used to happen due to the race + * described below, but now WARN because the code below should + * prevent it from happening here. + */ + if (WARN_ON(num_events > capacity - start_idx)) + num_events = capacity - start_idx; + + /* + * "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. + */ + for (i = 0; i < num_events; i++) { + ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); + time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + trace_iwlwifi_dev_ucode_cont_event( + priv->trans->dev, 0, time, ev); + } else { + data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); + trace_iwlwifi_dev_ucode_cont_event( + priv->trans->dev, time, data, ev); + } + } + /* Allow device to power down */ + iwl_trans_release_nic_access(priv->trans, ®_flags); +} + +static void iwl_continuous_event_trace(struct iwl_priv *priv) +{ + u32 capacity; /* event log capacity in # entries */ + struct { + u32 capacity; + u32 mode; + u32 wrap_counter; + u32 write_counter; + } __packed read; + u32 base; /* SRAM byte address of event log header */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + + base = priv->device_pointers.log_event_table; + if (iwlagn_hw_valid_rtc_data_addr(base)) { + iwl_trans_read_mem_bytes(priv->trans, base, + &read, sizeof(read)); + capacity = read.capacity; + mode = read.mode; + num_wraps = read.wrap_counter; + next_entry = read.write_counter; + } else + return; + + /* + * Unfortunately, the uCode doesn't use temporary variables. + * Therefore, it can happen that we read next_entry == capacity, + * which really means next_entry == 0. + */ + if (unlikely(next_entry == capacity)) + next_entry = 0; + /* + * Additionally, the uCode increases the write pointer before + * the wraps counter, so if the write pointer is smaller than + * the old write pointer (wrap occurred) but we read that no + * wrap occurred, we actually read between the next_entry and + * num_wraps update (this does happen in practice!!) -- take + * that into account by increasing num_wraps. + */ + if (unlikely(next_entry < priv->event_log.next_entry && + num_wraps == priv->event_log.num_wraps)) + num_wraps++; + + if (num_wraps == priv->event_log.num_wraps) { + iwl_print_cont_event_trace( + priv, base, priv->event_log.next_entry, + next_entry - priv->event_log.next_entry, + capacity, mode); + + priv->event_log.non_wraps_count++; + } else { + if (num_wraps - priv->event_log.num_wraps > 1) + priv->event_log.wraps_more_count++; + else + priv->event_log.wraps_once_count++; + + trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev, + num_wraps - priv->event_log.num_wraps, + next_entry, priv->event_log.next_entry); + + if (next_entry < priv->event_log.next_entry) { + iwl_print_cont_event_trace( + priv, base, priv->event_log.next_entry, + capacity - priv->event_log.next_entry, + capacity, mode); + + iwl_print_cont_event_trace( + priv, base, 0, next_entry, capacity, mode); + } else { + iwl_print_cont_event_trace( + priv, base, next_entry, + capacity - next_entry, + capacity, mode); + + iwl_print_cont_event_trace( + priv, base, 0, next_entry, capacity, mode); + } + } + + priv->event_log.num_wraps = num_wraps; + priv->event_log.next_entry = next_entry; +} + +/** + * iwl_bg_ucode_trace - Timer callback to log ucode event + * + * The timer is continually set to execute every + * UCODE_TRACE_PERIOD milliseconds after the last timer expired + * this function is to perform continuous uCode event logging operation + * if enabled + */ +static void iwl_bg_ucode_trace(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->event_log.ucode_trace) { + iwl_continuous_event_trace(priv); + /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } +} + +static void iwl_bg_tx_flush(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, tx_flush); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* do nothing if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + return; + + IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); + iwlagn_dev_txfifo_flush(priv); +} + +/* + * queue/FIFO/AC mapping definitions + */ + +static const u8 iwlagn_bss_ac_to_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, +}; + +static const u8 iwlagn_bss_ac_to_queue[] = { + 0, 1, 2, 3, +}; + +static const u8 iwlagn_pan_ac_to_fifo[] = { + IWL_TX_FIFO_VO_IPAN, + IWL_TX_FIFO_VI_IPAN, + IWL_TX_FIFO_BE_IPAN, + IWL_TX_FIFO_BK_IPAN, +}; + +static const u8 iwlagn_pan_ac_to_queue[] = { + 7, 6, 5, 4, +}; + +static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) +{ + int i; + + /* + * The default context is always valid, + * the PAN context depends on uCode. + */ + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); + if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) + priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN); + + for (i = 0; i < NUM_IWL_RXON_CTX; i++) + priv->contexts[i].ctxid = i; + + priv->contexts[IWL_RXON_CTX_BSS].always_active = true; + priv->contexts[IWL_RXON_CTX_BSS].is_active = true; + priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; + priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; + priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; + priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = + BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR); + priv->contexts[IWL_RXON_CTX_BSS].interface_modes = + BIT(NL80211_IFTYPE_STATION); + priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; + priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; + priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; + priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue, + iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue)); + memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo, + iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo)); + + priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; + priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = + REPLY_WIPAN_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = + REPLY_WIPAN_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN; + priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY; + priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID; + priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION; + priv->contexts[IWL_RXON_CTX_PAN].interface_modes = + BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); + + priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; + priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; + priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; + memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue, + iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue)); + memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo, + iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo)); + priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); +} + +static void iwl_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + struct iwl_ct_kill_throttling_config adv_cmd; + int ret = 0; + + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + + priv->thermal_throttle.ct_kill_toggle = false; + + if (priv->lib->support_ct_kill_exit) { + adv_cmd.critical_temperature_enter = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + adv_cmd.critical_temperature_exit = + cpu_to_le32(priv->hw_params.ct_kill_exit_threshold); + + ret = iwl_dvm_send_cmd_pdu(priv, + REPLY_CT_KILL_CONFIG_CMD, + 0, sizeof(adv_cmd), &adv_cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, critical temperature enter is %d," + "exit is %d\n", + priv->hw_params.ct_kill_threshold, + priv->hw_params.ct_kill_exit_threshold); + } else { + cmd.critical_temperature_R = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + + ret = iwl_dvm_send_cmd_pdu(priv, + REPLY_CT_KILL_CONFIG_CMD, + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, " + "critical temperature is %d\n", + priv->hw_params.ct_kill_threshold); + } +} + +static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg) +{ + struct iwl_calib_cfg_cmd calib_cfg_cmd; + struct iwl_host_cmd cmd = { + .id = CALIBRATION_CFG_CMD, + .len = { sizeof(struct iwl_calib_cfg_cmd), }, + .data = { &calib_cfg_cmd, }, + }; + + memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); + calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg); + + return iwl_dvm_send_cmd(priv, &cmd); +} + + +static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) +{ + struct iwl_tx_ant_config_cmd tx_ant_cmd = { + .valid = cpu_to_le32(valid_tx_ant), + }; + + if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) { + IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); + return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0, + sizeof(struct iwl_tx_ant_config_cmd), + &tx_ant_cmd); + } else { + IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); + return -EOPNOTSUPP; + } +} + +static void iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!iwlwifi_mod_params.bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + priv->bt_enable_flag = bt_cmd.flags; + IWL_DEBUG_INFO(priv, "BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, + 0, sizeof(struct iwl_bt_cmd), &bt_cmd)) + IWL_ERR(priv, "failed to send BT Coex Config\n"); +} + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +int iwl_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + if (iwl_is_rfkill(priv)) + return -ERFKILL; + + if (priv->event_log.ucode_trace) { + /* start collecting data now */ + mod_timer(&priv->ucode_trace, jiffies); + } + + /* download priority table before any calibration request */ + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + /* Configure Bluetooth device coexistence support */ + if (priv->lib->bt_params->bt_sco_disable) + priv->bt_enable_pspoll = false; + else + priv->bt_enable_pspoll = true; + + priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; + priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; + priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; + iwlagn_send_advance_bt_config(priv); + priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; + priv->cur_rssi_ctx = NULL; + + iwl_send_prio_tbl(priv); + + /* FIXME: w/a to force change uCode BT state machine */ + ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); + if (ret) + return ret; + ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); + if (ret) + return ret; + } else if (priv->lib->bt_params) { + /* + * default is 2-wire BT coexexistence support + */ + iwl_send_bt_config(priv); + } + + /* + * Perform runtime calibrations, including DC calibration. + */ + iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX); + + ieee80211_wake_queues(priv->hw); + + /* Configure Tx antenna selection based on H/W config */ + iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant); + + if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)&ctx->active; + /* apply any changes in staging */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + struct iwl_rxon_context *tmp; + /* Initialize our rx_config data */ + for_each_context(priv, tmp) + iwl_connection_init_rx_config(priv, tmp); + + iwlagn_set_rxon_chain(priv, ctx); + } + + if (!priv->wowlan) { + /* WoWLAN ucode will not reply in the same way, skip it */ + iwl_reset_run_time_calib(priv); + } + + set_bit(STATUS_READY, &priv->status); + + /* Configure the adapter for unassociated operation */ + ret = iwlagn_commit_rxon(priv, ctx); + if (ret) + return ret; + + /* At this point, the NIC is initialized and operational */ + iwl_rf_kill_ct_config(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + + return iwl_power_update_mode(priv, true); +} + +/** + * iwl_clear_driver_stations - clear knowledge of all stations from driver + * @priv: iwl priv struct + * + * This is called during iwl_down() to make sure that in the case + * we're coming there from a hardware restart mac80211 will be + * able to reconfigure stations -- if we're getting there in the + * normal down flow then the stations will already be cleared. + */ +static void iwl_clear_driver_stations(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + + spin_lock_bh(&priv->sta_lock); + memset(priv->stations, 0, sizeof(priv->stations)); + priv->num_stations = 0; + + priv->ucode_key_table = 0; + + for_each_context(priv, ctx) { + /* + * Remove all key information that is not stored as part + * of station information since mac80211 may not have had + * a chance to remove all the keys. When device is + * reconfigured by mac80211 after an error all keys will + * be reconfigured. + */ + memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); + ctx->key_mapping_keys = 0; + } + + spin_unlock_bh(&priv->sta_lock); +} + +void iwl_down(struct iwl_priv *priv) +{ + int exit_pending; + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + lockdep_assert_held(&priv->mutex); + + iwl_scan_cancel_timeout(priv, 200); + + exit_pending = + test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_ucode_stations(priv, NULL); + iwl_dealloc_bcast_stations(priv); + iwl_clear_driver_stations(priv); + + /* reset BT coex data */ + priv->bt_status = 0; + priv->cur_rssi_ctx = NULL; + priv->bt_is_sco = 0; + if (priv->lib->bt_params) + priv->bt_traffic_load = + priv->lib->bt_params->bt_init_traffic_load; + else + priv->bt_traffic_load = 0; + priv->bt_full_concurrent = false; + priv->bt_ci_compliance = 0; + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + priv->ucode_loaded = false; + iwl_trans_stop_device(priv->trans); + + /* Set num_aux_in_flight must be done after the transport is stopped */ + atomic_set(&priv->num_aux_in_flight, 0); + + /* Clear out all status bits but a few that are stable across reset */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = NULL; +} + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_run_time_calib_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + run_time_calib_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl_chain_noise_calibration(priv); + iwl_sensitivity_calibration(priv); + } + + mutex_unlock(&priv->mutex); +} + +void iwlagn_prepare_restart(struct iwl_priv *priv) +{ + bool bt_full_concurrent; + u8 bt_ci_compliance; + u8 bt_load; + u8 bt_status; + bool bt_is_sco; + int i; + + lockdep_assert_held(&priv->mutex); + + priv->is_open = 0; + + /* + * __iwl_down() will clear the BT status variables, + * which is correct, but when we restart we really + * want to keep them so restore them afterwards. + * + * The restart process will later pick them up and + * re-configure the hw when we reconfigure the BT + * command. + */ + bt_full_concurrent = priv->bt_full_concurrent; + bt_ci_compliance = priv->bt_ci_compliance; + bt_load = priv->bt_traffic_load; + bt_status = priv->bt_status; + bt_is_sco = priv->bt_is_sco; + + iwl_down(priv); + + priv->bt_full_concurrent = bt_full_concurrent; + priv->bt_ci_compliance = bt_ci_compliance; + priv->bt_traffic_load = bt_load; + priv->bt_status = bt_status; + priv->bt_is_sco = bt_is_sco; + + /* reset aggregation queues */ + for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++) + priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; + /* and stop counts */ + for (i = 0; i < IWL_MAX_HW_QUEUES; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc)); +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + mutex_lock(&priv->mutex); + iwlagn_prepare_restart(priv); + mutex_unlock(&priv->mutex); + iwl_cancel_deferred_work(priv); + if (priv->mac80211_registered) + ieee80211_restart_hw(priv->hw); + else + IWL_ERR(priv, + "Cannot request restart before registrating with mac80211\n"); + } else { + WARN_ON(1); + } +} + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); + INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); + INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); + INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); + + iwl_setup_scan_deferred_work(priv); + + if (priv->lib->bt_params) + iwlagn_bt_setup_deferred_work(priv); + + setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic, + (unsigned long)priv); + + setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace, + (unsigned long)priv); +} + +void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + if (priv->lib->bt_params) + iwlagn_bt_cancel_deferred_work(priv); + + cancel_work_sync(&priv->run_time_calib_work); + cancel_work_sync(&priv->beacon_update); + + iwl_cancel_scan_deferred_work(priv); + + cancel_work_sync(&priv->bt_full_concurrency); + cancel_work_sync(&priv->bt_runtime_config); + + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); +} + +static int iwl_init_drv(struct iwl_priv *priv) +{ + spin_lock_init(&priv->sta_lock); + + mutex_init(&priv->mutex); + + INIT_LIST_HEAD(&priv->calib_results); + + priv->band = IEEE80211_BAND_2GHZ; + + priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + priv->agg_tids_count = 0; + + priv->rx_statistics_jiffies = jiffies; + + /* Choose which receivers/antennas to use */ + iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); + + iwl_init_scan_params(priv); + + /* init bt coex */ + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; + priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; + priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; + priv->bt_on_thresh = BT_ON_THRESHOLD_DEF; + priv->bt_duration = BT_DURATION_LIMIT_DEF; + priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; + } + + return 0; +} + +static void iwl_uninit_drv(struct iwl_priv *priv) +{ + kfree(priv->scan_cmd); + kfree(priv->beacon_cmd); + kfree(rcu_dereference_raw(priv->noa_data)); + iwl_calib_free_results(priv); +#ifdef CONFIG_IWLWIFI_DEBUGFS + kfree(priv->wowlan_sram); +#endif +} + +static void iwl_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->ht_params) + priv->hw_params.use_rts_for_aggregation = + priv->cfg->ht_params->use_rts_for_aggregation; + + /* Device-specific setup */ + priv->lib->set_hw_params(priv); +} + + + +/* show what optional capabilities we have */ +static void iwl_option_config(struct iwl_priv *priv) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n"); +#else + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n"); +#endif + +#ifdef CONFIG_IWLWIFI_DEBUGFS + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n"); +#else + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n"); +#endif + +#ifdef CONFIG_IWLWIFI_DEVICE_TRACING + IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n"); +#else + IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n"); +#endif +} + +static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) +{ + struct iwl_nvm_data *data = priv->nvm_data; + + if (data->sku_cap_11n_enable && + !priv->cfg->ht_params) { + IWL_ERR(priv, "Invalid 11n configuration\n"); + return -EINVAL; + } + + if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable && + !data->sku_cap_band_52GHz_enable) { + IWL_ERR(priv, "Invalid device sku\n"); + return -EINVAL; + } + + IWL_DEBUG_INFO(priv, + "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n", + data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", + data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", + data->sku_cap_11n_enable ? "" : "NOT", "enabled"); + + priv->hw_params.tx_chains_num = + num_of_ant(data->valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) + priv->hw_params.rx_chains_num = 1; + else + priv->hw_params.rx_chains_num = + num_of_ant(data->valid_rx_ant); + + IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", + data->valid_tx_ant, + data->valid_rx_ant); + + return 0; +} + +static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, + const struct iwl_cfg *cfg, + const struct iwl_fw *fw, + struct dentry *dbgfs_dir) +{ + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_op_mode *op_mode; + u16 num_mac; + u32 ucode_flags; + struct iwl_trans_config trans_cfg = {}; + static const u8 no_reclaim_cmds[] = { + REPLY_RX_PHY_CMD, + REPLY_RX_MPDU_CMD, + REPLY_COMPRESSED_BA, + STATISTICS_NOTIFICATION, + REPLY_TX, + }; + int i; + + /************************ + * 1. Allocating HW data + ************************/ + hw = iwl_alloc_all(); + if (!hw) { + pr_err("%s: Cannot allocate network device\n", cfg->name); + goto out; + } + + op_mode = hw->priv; + op_mode->ops = &iwl_dvm_ops; + priv = IWL_OP_MODE_GET_DVM(op_mode); + priv->trans = trans; + priv->dev = trans->dev; + priv->cfg = cfg; + priv->fw = fw; + + switch (priv->cfg->device_family) { + case IWL_DEVICE_FAMILY_1000: + case IWL_DEVICE_FAMILY_100: + priv->lib = &iwl_dvm_1000_cfg; + break; + case IWL_DEVICE_FAMILY_2000: + priv->lib = &iwl_dvm_2000_cfg; + break; + case IWL_DEVICE_FAMILY_105: + priv->lib = &iwl_dvm_105_cfg; + break; + case IWL_DEVICE_FAMILY_2030: + case IWL_DEVICE_FAMILY_135: + priv->lib = &iwl_dvm_2030_cfg; + break; + case IWL_DEVICE_FAMILY_5000: + priv->lib = &iwl_dvm_5000_cfg; + break; + case IWL_DEVICE_FAMILY_5150: + priv->lib = &iwl_dvm_5150_cfg; + break; + case IWL_DEVICE_FAMILY_6000: + case IWL_DEVICE_FAMILY_6000i: + priv->lib = &iwl_dvm_6000_cfg; + break; + case IWL_DEVICE_FAMILY_6005: + priv->lib = &iwl_dvm_6005_cfg; + break; + case IWL_DEVICE_FAMILY_6050: + case IWL_DEVICE_FAMILY_6150: + priv->lib = &iwl_dvm_6050_cfg; + break; + case IWL_DEVICE_FAMILY_6030: + priv->lib = &iwl_dvm_6030_cfg; + break; + default: + break; + } + + if (WARN_ON(!priv->lib)) + goto out_free_hw; + + /* + * Populate the state variables that the transport layer needs + * to know about. + */ + trans_cfg.op_mode = op_mode; + trans_cfg.no_reclaim_cmds = no_reclaim_cmds; + trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED; + + trans_cfg.command_names = iwl_dvm_cmd_strings; + trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM; + + WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE < + priv->cfg->base_params->num_of_queues); + + ucode_flags = fw->ucode_capa.flags; + + if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { + priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; + trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; + } else { + priv->sta_key_max_num = STA_KEY_MAX_NUM; + trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + } + + /* Configure transport layer */ + iwl_trans_configure(priv->trans, &trans_cfg); + + trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); + + /* At this point both hw and priv are allocated. */ + + SET_IEEE80211_DEV(priv->hw, priv->trans->dev); + + iwl_option_config(priv); + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + + /* is antenna coupling more than 35dB ? */ + priv->bt_ant_couple_ok = + (iwlwifi_mod_params.ant_coupling > + IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? + true : false; + + /* bt channel inhibition enabled*/ + priv->bt_ch_announce = true; + IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", + (priv->bt_ch_announce) ? "On" : "Off"); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->statistics.lock); + + /*********************** + * 2. Read REV register + ***********************/ + IWL_INFO(priv, "Detected %s, REV=0x%X\n", + priv->cfg->name, priv->trans->hw_rev); + + if (iwl_trans_start_hw(priv->trans)) + goto out_free_hw; + + /* Read the EEPROM */ + if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob, + &priv->eeprom_blob_size)) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_free_hw; + } + + /* Reset chip to save power until we load uCode during "up". */ + iwl_trans_stop_device(priv->trans); + + priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg, + priv->eeprom_blob, + priv->eeprom_blob_size); + if (!priv->nvm_data) + goto out_free_eeprom_blob; + + if (iwl_nvm_check_version(priv->nvm_data, priv->trans)) + goto out_free_eeprom; + + if (iwl_eeprom_init_hw_params(priv)) + goto out_free_eeprom; + + /* extract MAC Address */ + memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); + priv->hw->wiphy->addresses = priv->addresses; + priv->hw->wiphy->n_addresses = 1; + num_mac = priv->nvm_data->n_hw_addrs; + if (num_mac > 1) { + memcpy(priv->addresses[1].addr, priv->addresses[0].addr, + ETH_ALEN); + priv->addresses[1].addr[5]++; + priv->hw->wiphy->n_addresses++; + } + + /************************ + * 4. Setup HW constants + ************************/ + iwl_set_hw_params(priv); + + if (!(priv->nvm_data->sku_cap_ipan_enable)) { + IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n"); + ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; + /* + * if not PAN, then don't support P2P -- might be a uCode + * packaging bug or due to the eeprom check above + */ + priv->sta_key_max_num = STA_KEY_MAX_NUM; + trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + + /* Configure transport layer again*/ + iwl_trans_configure(priv->trans, &trans_cfg); + } + + /******************* + * 5. Setup priv + *******************/ + for (i = 0; i < IWL_MAX_HW_QUEUES; i++) { + priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; + if (i < IWLAGN_FIRST_AMPDU_QUEUE && + i != IWL_DEFAULT_CMD_QUEUE_NUM && + i != IWL_IPAN_CMD_QUEUE_NUM) + priv->queue_to_mac80211[i] = i; + atomic_set(&priv->queue_stop_count[i], 0); + } + + if (iwl_init_drv(priv)) + goto out_free_eeprom; + + /* At this point both hw and priv are initialized. */ + + /******************** + * 6. Setup services + ********************/ + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + iwl_power_initialize(priv); + iwl_tt_initialize(priv); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%s", fw->fw_version); + + priv->new_scan_threshold_behaviour = + !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); + + priv->phy_calib_chain_noise_reset_cmd = + fw->ucode_capa.standard_phy_calibration_size; + priv->phy_calib_chain_noise_gain_cmd = + fw->ucode_capa.standard_phy_calibration_size + 1; + + /* initialize all valid contexts */ + iwl_init_context(priv, ucode_flags); + + /************************************************** + * This is still part of probe() in a sense... + * + * 7. Setup and register with mac80211 and debugfs + **************************************************/ + if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) + goto out_destroy_workqueue; + + if (iwl_dbgfs_register(priv, dbgfs_dir)) + goto out_mac80211_unregister; + + return op_mode; + +out_mac80211_unregister: + iwlagn_mac_unregister(priv); +out_destroy_workqueue: + iwl_tt_exit(priv); + iwl_cancel_deferred_work(priv); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_uninit_drv(priv); +out_free_eeprom_blob: + kfree(priv->eeprom_blob); +out_free_eeprom: + iwl_free_nvm_data(priv->nvm_data); +out_free_hw: + ieee80211_free_hw(priv->hw); +out: + op_mode = NULL; + return op_mode; +} + +static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwlagn_mac_unregister(priv); + + iwl_tt_exit(priv); + + kfree(priv->eeprom_blob); + iwl_free_nvm_data(priv->nvm_data); + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + iwl_uninit_drv(priv); + + dev_kfree_skb(priv->beacon_skb); + + iwl_trans_op_mode_leave(priv->trans); + ieee80211_free_hw(priv->hw); +} + +static const char * const desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT", + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", +}; + +static struct { char *name; u8 num; } advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *desc_lookup(u32 num) +{ + int i; + int max = ARRAY_SIZE(desc_lookup_text); + + if (num < max) + return desc_lookup_text[num]; + + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break; + } + return advanced_lookup[i].name; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + struct iwl_trans *trans = priv->trans; + u32 base; + struct iwl_error_event_table table; + + base = priv->device_pointers.error_event_table; + if (priv->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = priv->fw->init_errlog_ptr; + } else { + if (!base) + base = priv->fw->inst_errlog_ptr; + } + + if (!iwlagn_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (priv->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + /*TODO: Update dbgfs with ISR error stats obtained below */ + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + priv->status, table.valid); + } + + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, + table.data1, table.data2, table.line, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.ucode_ver, + table.hw_ver, 0, table.brd_ver); + IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(priv, "0x%08X | uPc\n", table.pc); + IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(priv, "0x%08X | data1\n", table.data1); + IWL_ERR(priv, "0x%08X | data2\n", table.data2); + IWL_ERR(priv, "0x%08X | line\n", table.line); + IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver); + IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd); + IWL_ERR(priv, "0x%08X | isr0\n", table.isr0); + IWL_ERR(priv, "0x%08X | isr1\n", table.isr1); + IWL_ERR(priv, "0x%08X | isr2\n", table.isr2); + IWL_ERR(priv, "0x%08X | isr3\n", table.isr3); + IWL_ERR(priv, "0x%08X | isr4\n", table.isr4); + IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler); +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + */ +static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + struct iwl_trans *trans = priv->trans; + + if (num_events == 0) + return pos; + + base = priv->device_pointers.log_event_table; + if (priv->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = priv->fw->init_evtlog_ptr; + } else { + if (!base) + base = priv->fw->inst_evtlog_ptr; + } + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + if (!iwl_trans_grab_nic_access(trans, false, ®_flags)) + return pos; + + /* Set starting address; reads will auto-increment */ + iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + time = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOG:0x%08x:%04u\n", + time, ev); + } else { + trace_iwlwifi_dev_ucode_event(trans->dev, 0, + time, ev); + IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", + time, ev); + } + } else { + data = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + trace_iwlwifi_dev_ucode_event(trans->dev, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_trans_release_nic_access(trans, ®_flags); + return pos; +} + +/** + * iwl_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } else { + if (next_entry < size) { + pos = iwl_print_event_log(priv, 0, next_entry, + mode, pos, buf, bufsz); + } else { + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } + } + return pos; +} + +#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + u32 logsize; + int pos = 0; + size_t bufsz = 0; + struct iwl_trans *trans = priv->trans; + + base = priv->device_pointers.log_event_table; + if (priv->cur_ucode == IWL_UCODE_INIT) { + logsize = priv->fw->init_evtlog_size; + if (!base) + base = priv->fw->init_evtlog_ptr; + } else { + logsize = priv->fw->inst_evtlog_size; + if (!base) + base = priv->fw->inst_evtlog_ptr; + } + + if (!iwlagn_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Invalid event log pointer 0x%08X for %s uCode\n", + base, + (priv->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_trans_read_mem32(trans, base); + mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32))); + num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32))); + next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32))); + + if (capacity > logsize) { + IWL_ERR(priv, "Log capacity %d is bogus, limit to %d " + "entries\n", capacity, logsize); + capacity = logsize; + } + + if (next_entry > logsize) { + IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", + next_entry, logsize); + next_entry = logsize; + } + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + + if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; + IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", + size); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (buf) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) { + /* + * if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + /* (then/else) start at top of log */ + pos = iwl_print_event_log(priv, 0, + next_entry, mode, pos, buf, bufsz); + } else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) +{ + unsigned int reload_msec; + unsigned long reload_jiffies; + + if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) + iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); + + /* uCode is no longer loaded. */ + priv->ucode_loaded = false; + + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + iwl_abort_notification_waits(&priv->notif_wait); + + /* Keep the restart process from trying to send host + * commands by clearing the ready bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!ondemand) { + /* + * If firmware keep reloading, then it indicate something + * serious wrong and firmware having problem to recover + * from it. Instead of keep trying which will fill the syslog + * and hang the system, let's just stop it + */ + reload_jiffies = jiffies; + reload_msec = jiffies_to_msecs((long) reload_jiffies - + (long) priv->reload_jiffies); + priv->reload_jiffies = reload_jiffies; + if (reload_msec <= IWL_MIN_RELOAD_DURATION) { + priv->reload_count++; + if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) { + IWL_ERR(priv, "BUG_ON, Stop restarting\n"); + return; + } + } else + priv->reload_count = 0; + } + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + if (iwlwifi_mod_params.restart_fw) { + IWL_DEBUG_FW_ERRORS(priv, + "Restarting adapter due to uCode error.\n"); + queue_work(priv->workqueue, &priv->restart); + } else + IWL_DEBUG_FW_ERRORS(priv, + "Detected FW error, but not restarting\n"); + } +} + +static void iwl_nic_error(struct iwl_op_mode *op_mode) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + IWL_ERR(priv, "Loaded firmware version: %s\n", + priv->fw->fw_version); + + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv, false, NULL); + + iwlagn_fw_error(priv, false); +} + +static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + if (!iwl_check_for_ct_kill(priv)) { + IWL_ERR(priv, "Restarting adapter queue is full\n"); + iwlagn_fw_error(priv, false); + } +} + +#define EEPROM_RF_CONFIG_TYPE_MAX 0x3 + +static void iwl_nic_config(struct iwl_op_mode *op_mode) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + /* SKU Control */ + iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP, + (CSR_HW_REV_STEP(priv->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) | + (CSR_HW_REV_DASH(priv->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_DASH)); + + /* write radio config values to register */ + if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) { + u32 reg_val = + priv->nvm_data->radio_cfg_type << + CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE | + priv->nvm_data->radio_cfg_step << + CSR_HW_IF_CONFIG_REG_POS_PHY_STEP | + priv->nvm_data->radio_cfg_dash << + CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; + + iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | + CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | + CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, + reg_val); + + IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n", + priv->nvm_data->radio_cfg_type, + priv->nvm_data->radio_cfg_step, + priv->nvm_data->radio_cfg_dash); + } else { + WARN_ON(1); + } + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + /* W/A : NIC is stuck in a reset state after Early PCIe power off + * (PCIe power is lost before PERST# is asserted), + * causing ME FW to lose ownership and not being able to obtain it back. + */ + iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, + ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); + + if (priv->lib->nic_config) + priv->lib->nic_config(priv); +} + +static void iwl_wimax_active(struct iwl_op_mode *op_mode) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + clear_bit(STATUS_READY, &priv->status); + IWL_ERR(priv, "RF is used by WiMAX\n"); +} + +static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + int mq = priv->queue_to_mac80211[queue]; + + if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) + return; + + if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) { + IWL_DEBUG_TX_QUEUES(priv, + "queue %d (mac80211 %d) already stopped\n", + queue, mq); + return; + } + + set_bit(mq, &priv->transport_queue_stop); + ieee80211_stop_queue(priv->hw, mq); +} + +static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + int mq = priv->queue_to_mac80211[queue]; + + if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) + return; + + if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) { + IWL_DEBUG_TX_QUEUES(priv, + "queue %d (mac80211 %d) already awake\n", + queue, mq); + return; + } + + clear_bit(mq, &priv->transport_queue_stop); + + if (!priv->passive_no_rx) + ieee80211_wake_queue(priv->hw, mq); +} + +void iwlagn_lift_passive_no_rx(struct iwl_priv *priv) +{ + int mq; + + if (!priv->passive_no_rx) + return; + + for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) { + if (!test_bit(mq, &priv->transport_queue_stop)) { + IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq); + ieee80211_wake_queue(priv->hw, mq); + } else { + IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq); + } + } + + priv->passive_no_rx = false; +} + +static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); + ieee80211_free_txskb(priv->hw, skb); +} + +static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + if (state) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); + + return false; +} + +static const struct iwl_op_mode_ops iwl_dvm_ops = { + .start = iwl_op_mode_dvm_start, + .stop = iwl_op_mode_dvm_stop, + .rx = iwl_rx_dispatch, + .queue_full = iwl_stop_sw_queue, + .queue_not_full = iwl_wake_sw_queue, + .hw_rf_kill = iwl_set_hw_rfkill_state, + .free_skb = iwl_free_skb, + .nic_error = iwl_nic_error, + .cmd_queue_full = iwl_cmd_queue_full, + .nic_config = iwl_nic_config, + .wimax_active = iwl_wimax_active, +}; + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ +static int __init iwl_init(void) +{ + + int ret; + + ret = iwlagn_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops); + if (ret) { + pr_err("Unable to register op_mode: %d\n", ret); + iwlagn_rate_control_unregister(); + } + + return ret; +} +module_init(iwl_init); + +static void __exit iwl_exit(void) +{ + iwl_opmode_deregister("iwldvm"); + iwlagn_rate_control_unregister(); +} +module_exit(iwl_exit); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c new file mode 100644 index 000000000000..1513dbc79c14 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c @@ -0,0 +1,395 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + + +#include +#include +#include +#include +#include "iwl-io.h" +#include "iwl-debug.h" +#include "iwl-trans.h" +#include "iwl-modparams.h" +#include "dev.h" +#include "agn.h" +#include "commands.h" +#include "power.h" + +static bool force_cam = true; +module_param(force_cam, bool, 0644); +MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); + +/* + * Setting power level allows the card to go to sleep when not busy. + * + * We calculate a sleep command based on the required latency, which + * we get from mac80211. In order to handle thermal throttling, we can + * also use pre-defined power levels. + */ + +/* + * This defines the old power levels. They are still used by default + * (level 1) and for thermal throttle (levels 3 through 5) + */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; /* number of skip dtim */ +}; + +#define IWL_DTIM_RANGE_0_MAX 2 +#define IWL_DTIM_RANGE_1_MAX 10 + +#define NOSLP cpu_to_le16(0), 0, 0 +#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 +#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \ + IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \ + IWL_POWER_ADVANCE_PM_ENA_MSK) +#define ASLP_TOUT(T) cpu_to_le32(T) +#define TU_TO_USEC 1024 +#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) +#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ + cpu_to_le32(X1), \ + cpu_to_le32(X2), \ + cpu_to_le32(X3), \ + cpu_to_le32(X4)} +/* default power management (not Tx power) table values */ +/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ +/* DTIM 0 - 2 */ +static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2} +}; + + +/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ +/* DTIM 3 - 10 */ +static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2} +}; + +/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ +/* DTIM 11 - */ +static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} +}; + +/* advance power management */ +/* DTIM 0 - 2 */ +static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = { + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} +}; + + +/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ +/* DTIM 3 - 10 */ +static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = { + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2} +}; + +/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ +/* DTIM 11 - */ +static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = { + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, + {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), + SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} +}; + +static void iwl_static_sleep_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, + enum iwl_power_level lvl, int period) +{ + const struct iwl_power_vec_entry *table; + int max_sleep[IWL_POWER_VEC_SIZE] = { 0 }; + int i; + u8 skip; + u32 slp_itrvl; + + if (priv->lib->adv_pm) { + table = apm_range_2; + if (period <= IWL_DTIM_RANGE_1_MAX) + table = apm_range_1; + if (period <= IWL_DTIM_RANGE_0_MAX) + table = apm_range_0; + } else { + table = range_2; + if (period <= IWL_DTIM_RANGE_1_MAX) + table = range_1; + if (period <= IWL_DTIM_RANGE_0_MAX) + table = range_0; + } + + if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM)) + memset(cmd, 0, sizeof(*cmd)); + else + *cmd = table[lvl].cmd; + + if (period == 0) { + skip = 0; + period = 1; + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + max_sleep[i] = 1; + + } else { + skip = table[lvl].no_dtim; + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]); + max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1; + } + + slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); + /* figure out the listen interval based on dtim period and skip */ + if (slp_itrvl == 0xFF) + cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = + cpu_to_le32(period * (skip + 1)); + + slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); + if (slp_itrvl > period) + cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = + cpu_to_le32((slp_itrvl / period) * period); + + if (skip) + cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; + else + cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; + + if (priv->cfg->base_params->shadow_reg_enable) + cmd->flags |= IWL_POWER_SHADOW_REG_ENA; + else + cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; + + if (iwl_advanced_bt_coexist(priv)) { + if (!priv->lib->bt_params->bt_sco_disable) + cmd->flags |= IWL_POWER_BT_SCO_ENA; + else + cmd->flags &= ~IWL_POWER_BT_SCO_ENA; + } + + + slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); + if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) + cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = + cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL); + + /* enforce max sleep interval */ + for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) { + if (le32_to_cpu(cmd->sleep_interval[i]) > + (max_sleep[i] * period)) + cmd->sleep_interval[i] = + cpu_to_le32(max_sleep[i] * period); + if (i != (IWL_POWER_VEC_SIZE - 1)) { + if (le32_to_cpu(cmd->sleep_interval[i]) > + le32_to_cpu(cmd->sleep_interval[i+1])) + cmd->sleep_interval[i] = + cmd->sleep_interval[i+1]; + } + } + + if (priv->power_data.bus_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + else + cmd->flags &= ~IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n", + skip, period); + /* The power level here is 0-4 (used as array index), but user expects + to see 1-5 (according to spec). */ + IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); +} + +static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + + if (priv->power_data.bus_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); +} + +static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) +{ + IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); + IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0, + sizeof(struct iwl_powertable_cmd), cmd); +} + +static void iwl_power_build_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd) +{ + bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; + int dtimper; + + if (force_cam) { + iwl_power_sleep_cam_cmd(priv, cmd); + return; + } + + dtimper = priv->hw->conf.ps_dtim_period ?: 1; + + if (priv->wowlan) + iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); + else if (!priv->lib->no_idle_support && + priv->hw->conf.flags & IEEE80211_CONF_IDLE) + iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); + else if (iwl_tt_is_low_power_state(priv)) { + /* in thermal throttling low power state */ + iwl_static_sleep_cmd(priv, cmd, + iwl_tt_current_power_mode(priv), dtimper); + } else if (!enabled) + iwl_power_sleep_cam_cmd(priv, cmd); + else if (priv->power_data.debug_sleep_level_override >= 0) + iwl_static_sleep_cmd(priv, cmd, + priv->power_data.debug_sleep_level_override, + dtimper); + else { + /* Note that the user parameter is 1-5 (according to spec), + but we pass 0-4 because it acts as an array index. */ + if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 && + iwlwifi_mod_params.power_level <= IWL_POWER_NUM) + iwl_static_sleep_cmd(priv, cmd, + iwlwifi_mod_params.power_level - 1, dtimper); + else + iwl_static_sleep_cmd(priv, cmd, + IWL_POWER_INDEX_1, dtimper); + } +} + +int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force) +{ + int ret; + bool update_chains; + + lockdep_assert_held(&priv->mutex); + + /* Don't update the RX chain when chain noise calibration is running */ + update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || + priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; + + if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) + return 0; + + if (!iwl_is_ready_rf(priv)) + return -EIO; + + /* scan complete use sleep_power_next, need to be updated */ + memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); + if (test_bit(STATUS_SCANNING, &priv->status) && !force) { + IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); + return 0; + } + + if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) + iwl_dvm_set_pmi(priv, true); + + ret = iwl_set_power(priv, cmd); + if (!ret) { + if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) + iwl_dvm_set_pmi(priv, false); + + if (update_chains) + iwl_update_chain_flags(priv); + else + IWL_DEBUG_POWER(priv, + "Cannot update the power, chain noise " + "calibration running: %d\n", + priv->chain_noise_data.state); + + memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); + } else + IWL_ERR(priv, "set power fail, ret = %d\n", ret); + + return ret; +} + +int iwl_power_update_mode(struct iwl_priv *priv, bool force) +{ + struct iwl_powertable_cmd cmd; + + iwl_power_build_cmd(priv, &cmd); + return iwl_power_set_mode(priv, &cmd, force); +} + +/* initialize to default */ +void iwl_power_initialize(struct iwl_priv *priv) +{ + priv->power_data.bus_pm = priv->trans->pm_support; + + priv->power_data.debug_sleep_level_override = -1; + + memset(&priv->power_data.sleep_cmd, 0, + sizeof(priv->power_data.sleep_cmd)); +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h new file mode 100644 index 000000000000..570d3a5e4670 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h @@ -0,0 +1,47 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#ifndef __iwl_power_setting_h__ +#define __iwl_power_setting_h__ + +#include "commands.h" + +struct iwl_power_mgr { + struct iwl_powertable_cmd sleep_cmd; + struct iwl_powertable_cmd sleep_cmd_next; + int debug_sleep_level_override; + bool bus_pm; +}; + +int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force); +int iwl_power_update_mode(struct iwl_priv *priv, bool force); +void iwl_power_initialize(struct iwl_priv *priv); + +extern bool no_sleep_autoadjust; + +#endif /* __iwl_power_setting_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c new file mode 100644 index 000000000000..cef921c1a623 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -0,0 +1,3338 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "dev.h" +#include "agn.h" + +#define RS_NAME "iwl-agn-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ +#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IWL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO2_##s##M_PLCP,\ + IWL_RATE_MIMO3_##s##M_PLCP,\ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ + /* FIXME:RS: ^^ should be INV (legacy) */ +}; + +static inline u8 rs_extract_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & RATE_MCS_RATE_MSK); +} + +static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = rs_extract_rate(rate_n_flags); + + if (idx >= IWL_RATE_MIMO3_6M_PLCP) + idx = idx - IWL_RATE_MIMO3_6M_PLCP; + else if (idx >= IWL_RATE_MIMO2_6M_PLCP) + idx = idx - IWL_RATE_MIMO2_6M_PLCP; + + idx += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) + if (iwl_rates[idx].plcp == + rs_extract_rate(rate_n_flags)) + return idx; + } + + return -1; +} + +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta); +static void rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 rate_n_flags); +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); + + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index); +#else +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */ + {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */ +}; + +static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */ + {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */ +}; + +static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */ + {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */ + {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/ +}; + +static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */ + {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ +}; + +static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */ + {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */ + {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */ + {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */ +}; + +static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */ + {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */ + {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */ + {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the statistics. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && + (tl->time_stamp < oldest_time)) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else + return IWL_MAX_TID_COUNT; + + if (unlikely(tid >= IWL_MAX_TID_COUNT)) + return IWL_MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return IWL_MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + rs_tl_rm_old_stats(tl, curr_time); + + index = (tl->head + index) % TID_QUEUE_MAX_SIZE; + tl->packet_count[index] = tl->packet_count[index] + 1; + tl->total = tl->total + 1; + + if ((index + 1) > tl->queue_count) + tl->queue_count = index + 1; + + return tid; +} + +#ifdef CONFIG_MAC80211_DEBUGFS +/** + * Program the device to use fixed rate for frame transmit + * This is for debugging/testing only + * once the device start use fixed rate, we need to reload the module + * to being back the normal operation. + */ +static void rs_program_fix_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); + struct iwl_rxon_context *ctx = sta_priv->ctx; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, + false); + } +} +#endif + +/* + get the traffic load value for tid +*/ +static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + + if (tid >= IWL_MAX_TID_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + u32 load; + + /* + * Don't create TX aggregation sessions when in high + * BT traffic, as they would just be disrupted by BT. + */ + if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) { + IWL_DEBUG_COEX(priv, + "BT traffic (%d), no aggregation allowed\n", + priv->bt_traffic_load); + return ret; + } + + load = rs_tl_get_load(lq_data, tid); + + IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + return ret; +} + +static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < IWL_MAX_TID_COUNT) + rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else + IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n", + tid, IWL_MAX_TID_COUNT); +} + +static inline int get_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/** + * rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) +{ + struct iwl_rate_scale_data *window = NULL; + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + /* Select window for current tx bit rate */ + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = get_expected_tpt(tbl, scale_index); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +/* FIXME:RS:remove this function and put the flags statically in the table */ +static u32 rate_n_flags_from_tbl(struct iwl_priv *priv, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_Ht(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) { + IWL_ERR(priv, "Invalid HT rate index %d\n", index); + index = IWL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_siso; + else if (is_mimo2(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_mimo2; + else + rate_n_flags |= iwl_rates[index].plcp_mimo3; + } else { + IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) { + if (tbl->is_dup) + rate_n_flags |= RATE_MCS_DUP_MSK; + else + rate_n_flags |= RATE_MCS_HT40_MSK; + } + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IWL_ERR(priv, "GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); + *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == IWL_RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->is_dup = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IWL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((rate_n_flags & RATE_MCS_HT40_MSK) || + (rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_ht40 = 1; + + if (rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + mcs = rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= IWL_RATE_SISO_60M_PLCP) { + if (num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE*/ + /* MIMO2 */ + } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { + if (num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + /* MIMO3 */ + } else { + if (num_of_ant == 3) { + tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; + tbl->lq_type = LQ_MIMO3; + } + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct iwl_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while ((new_ant_type != tbl->ant_type) && + !rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static bool rs_use_green(struct ieee80211_sta *sta) +{ + /* + * There's a bug somewhere in this code that causes the + * scaling to get stuck because GF+SGI can't be combined + * in SISO rates. Until we find that bug, disable GF, it + * has only limited benefit and we still interoperate with + * GF APs since we can always receive GF transmissions. + */ + return false; +} + +/** + * rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type) +{ + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else if (is_mimo2(rate_type)) + return lq_sta->active_mimo2_rate; + else + return lq_sta->active_mimo3_rate; + } +} + +static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + u8 scale_index, u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct iwl_priv *priv = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + first_antenna(priv->nvm_data->valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IWL_MAX_SEARCH; + } + + rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = (u16)(rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_index))) { + low = scale_index; + goto out; + } + + high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == IWL_RATE_INVALID) + low = scale_index; + +out: + return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool table_type_matches(struct iwl_scale_tbl_info *a, + struct iwl_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && + (a->is_SGI == b->is_SGI); +} + +static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + bool full_concurrent = priv->bt_full_concurrent; + + if (priv->bt_ant_couple_ok) { + /* + * Is there a need to switch between + * full concurrency and 3-wire? + */ + if (priv->bt_ci_compliance && priv->bt_ant_couple_ok) + full_concurrent = true; + else + full_concurrent = false; + } + if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || + (priv->bt_full_concurrent != full_concurrent)) { + priv->bt_full_concurrent = full_concurrent; + priv->last_bt_traffic_load = priv->bt_traffic_load; + + /* Update uCode's rate table. */ + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); + iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); + + queue_work(priv->workqueue, &priv->bt_full_concurrency); + } +} + +/* + * mac80211 sends us Tx status + */ +static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_index, mac_index, i; + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_link_quality_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r; + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->ctx; + + IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + return; + } + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); + if (priv->band == IEEE80211_BAND_5GHZ) + rs_index -= IWL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_index = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ + if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) + mac_index++; + /* + * mac80211 HT index is always zero-indexed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (priv->band == IEEE80211_BAND_2GHZ) + mac_index += IWL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if ((mac_index < 0) || + (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || + (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || + (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || + (tbl_type.ant_type != info->status.antenna) || + (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) || + (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || + (rs_index != mac_index)) { + IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (table_type_matches(&tbl_type, + &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else if (table_type_matches(&tbl_type, + &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n", + tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI); + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, + &rs_index); + rs_collect_tx_data(curr_tbl, rs_index, + info->status.ampdu_len, + info->status.ampdu_ack_len); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); + rs_get_tbl_info_from_mcs(tx_rate, priv->band, + &tbl_type, &rs_index); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (table_type_matches(&tbl_type, curr_tbl)) + tmp_tbl = curr_tbl; + else if (table_type_matches(&tbl_type, other_tbl)) + tmp_tbl = other_tbl; + else + continue; + rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta && sta->supp_rates[sband->band]) + rs_rate_scale_perform(priv, skb, sta, lq_sta); + + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) + rs_bt_update_lq(priv, ctx, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else if (is_mimo2(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo3_20MHz; + else /* if (is_mimo3(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo3_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 rs_get_best_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 index) +{ + /* "active" values */ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[index].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[index]; + /* expected "search" throughput */ + const u16 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = index; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = rs_get_adjacent_rate(priv, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != IWL_RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != IWL_RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +/* + * Set up search table for MIMO2 + */ +static int rs_switch_to_mimo2(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 2) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + rs_set_expected_tpt_table(lq_sta, tbl); + + rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for MIMO3 + */ +static int rs_switch_to_mimo3(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 3) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); + + tbl->lq_type = LQ_MIMO3; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; + rate_mask = lq_sta->active_mimo3_rate; + + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + rs_set_expected_tpt_table(lq_sta, tbl); + + rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); + + tbl->is_dup = lq_sta->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ + + rs_set_expected_tpt_table(lq_sta, tbl); + rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static void rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret = 0; + u8 update_search_tbl_counter = 0; + + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2) + tbl->action = IWL_LEGACY_SWITCH_SISO; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + valid_tx_ant = + first_antenna(priv->nvm_data->valid_tx_ant); + if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && + tbl->action != IWL_LEGACY_SWITCH_SISO) + tbl->action = IWL_LEGACY_SWITCH_SISO; + break; + default: + IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); + break; + } + + if (!iwl_ht_enabled(priv)) + /* stay in Legacy */ + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && + tbl->action > IWL_LEGACY_SWITCH_SISO) + tbl->action = IWL_LEGACY_SWITCH_SISO; + + /* configure as 1x1 if bt full concurrency */ + if (priv->bt_full_concurrent) { + if (!iwl_ht_enabled(priv)) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) + tbl->action = IWL_LEGACY_SWITCH_SISO; + valid_tx_ant = + first_antenna(priv->nvm_data->valid_tx_ant); + } + + start_action = tbl->action; + for (; ;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA1: + case IWL_LEGACY_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (window->success_ratio >= IWL_RS_GOOD_RATIO && + !priv->bt_full_concurrent && + priv->bt_traffic_load == + IWL_BT_COEX_TRAFFIC_LOAD_NONE) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + rs_set_expected_tpt_table(lq_sta, search_tbl); + goto out; + } + break; + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IWL_LEGACY_SWITCH_MIMO2_AB: + case IWL_LEGACY_SWITCH_MIMO2_AC: + case IWL_LEGACY_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + + case IWL_LEGACY_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n"); + + /* Set up search table to try MIMO3 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + + } + search_tbl->lq_type = LQ_NONE; + return; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; +} + +/* + * Try to switch to new modulation mode from SISO + */ +static void rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + u8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) + tbl->action = IWL_SISO_SWITCH_MIMO2_AB; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + valid_tx_ant = + first_antenna(priv->nvm_data->valid_tx_ant); + if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + break; + default: + IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); + break; + } + + if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && + tbl->action > IWL_SISO_SWITCH_ANTENNA2) { + /* stay in SISO */ + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + } + + /* configure as 1x1 if bt full concurrency */ + if (priv->bt_full_concurrent) { + valid_tx_ant = + first_antenna(priv->nvm_data->valid_tx_ant); + if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + } + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA1: + case IWL_SISO_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); + if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO && + !priv->bt_full_concurrent && + priv->bt_traffic_load == + IWL_BT_COEX_TRAFFIC_LOAD_NONE) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_SISO_SWITCH_MIMO2_AB: + case IWL_SISO_SWITCH_MIMO2_AC: + case IWL_SISO_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + case IWL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IWL_ERR(priv, + "SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + case IWL_SISO_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return; + + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static void rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO2_SWITCH_SISO_A) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || + tbl->action == IWL_MIMO2_SWITCH_SISO_C) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + default: + IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); + break; + } + + if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && + (tbl->action < IWL_MIMO2_SWITCH_SISO_A || + tbl->action > IWL_MIMO2_SWITCH_SISO_C)) { + /* switch in SISO */ + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + } + + /* configure as 1x1 if bt full concurrency */ + if (priv->bt_full_concurrent && + (tbl->action < IWL_MIMO2_SWITCH_SISO_A || + tbl->action > IWL_MIMO2_SWITCH_SISO_C)) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO2_SWITCH_ANTENNA1: + case IWL_MIMO2_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_MIMO2_SWITCH_SISO_A: + case IWL_MIMO2_SWITCH_SISO_B: + case IWL_MIMO2_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + + case IWL_MIMO2_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + } + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + +} + +/* + * Try to switch to new modulation mode from MIMO3 + */ +static void rs_move_mimo3_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret; + u8 update_search_tbl_counter = 0; + + switch (priv->bt_traffic_load) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || + tbl->action == IWL_MIMO3_SWITCH_SISO_C) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + default: + IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); + break; + } + + if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && + (tbl->action < IWL_MIMO3_SWITCH_SISO_A || + tbl->action > IWL_MIMO3_SWITCH_SISO_C)) { + /* switch in SISO */ + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + } + + /* configure as 1x1 if bt full concurrency */ + if (priv->bt_full_concurrent && + (tbl->action < IWL_MIMO3_SWITCH_SISO_A || + tbl->action > IWL_MIMO3_SWITCH_SISO_C)) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO3_SWITCH_ANTENNA1: + case IWL_MIMO3_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n"); + + if (tx_chains_num <= 3) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) + goto out; + break; + case IWL_MIMO3_SWITCH_SISO_A: + case IWL_MIMO3_SWITCH_SISO_B: + case IWL_MIMO3_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO3_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO3_SWITCH_MIMO2_AB: + case IWL_MIMO3_SWITCH_MIMO2_AC: + case IWL_MIMO3_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n"); + + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO3_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IWL_MIMO3_SWITCH_GI) + tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO3_SWITCH_GI) + tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_priv *priv; + + priv = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + (lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + */ +static void rs_update_rate_tbl(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); + rs_fill_link_cmd(priv, lq_sta, rate); + iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = IWL_MAX_TID_COUNT; + struct iwl_tid_data *tid_data; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->ctx; + + IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = rs_tl_add_packet(lq_sta, hdr); + if ((tid != IWL_MAX_TID_COUNT) && + (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid]; + if (tid_data->agg.state == IWL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else + lq_sta->is_agg = 0; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = rs_use_green(sta); + is_green = lq_sta->is_green; + + /* current tx rate */ + index = lq_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, + tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_index_msk = (u16) (rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_sta->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (!((1 << index) & rate_scale_index_msk)) { + IWL_ERR(priv, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + rs_update_rate_tbl(priv, ctx, lq_sta, tbl, + index, is_green); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < index)) { + index = lq_sta->max_rate_idx; + update_lq = 1; + window = &(tbl->win[index]); + goto lq_update; + } + + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " + "for index %d\n", + window->success_counter, window->counter, index); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + rs_stay_in_table(lq_sta, false); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + if (window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)) { + IWL_ERR(priv, "expected_tpt should have been calculated by now\n"); + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl && + (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + + IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + + /* Else poor success; go back to mode in "active" table */ + } else { + + IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < high)) + high = IWL_RATE_INVALID; + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { + IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); + scale_action = -1; + + /* No throughput measured yet for adjacent rates; try increase. */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IWL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && + sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } else { + scale_action = 0; + } + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IWL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((sr > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type)) + scale_action = -1; + if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) + scale_action = -1; + + if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + if (lq_sta->last_bt_traffic > priv->bt_traffic_load) { + /* + * don't set scale_action, don't want to scale up if + * the rate scale doesn't otherwise think that is a + * good idea. + */ + } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) { + scale_action = -1; + } + } + lq_sta->last_bt_traffic = priv->bt_traffic_load; + + if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + /* search for a new modulation */ + rs_stay_in_table(lq_sta, true); + goto lq_update; + } + + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green); + + if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { + /* Should we stay with this modulation mode, + * or search for a new one? */ + rs_stay_in_table(lq_sta, false); + } + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) { + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + rs_move_legacy_other(priv, lq_sta, conf, sta, index); + else if (is_siso(tbl->lq_type)) + rs_move_siso_to_other(priv, lq_sta, conf, sta, index); + else if (is_mimo2(tbl->lq_type)) + rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index); + else + rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + + /* Use new "search" start rate */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + + IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", + tbl->current_rate, index); + rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); + iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); + } else + done_search = 1; + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && + lq_sta->action_counter > tbl1->max_search) { + IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); + rs_set_stay_in_table(priv, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + (lq_sta->action_counter >= tbl1->max_search) && + iwl_ht_enabled(priv)) { + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != IWL_MAX_TID_COUNT)) { + u8 sta_id = lq_sta->lq.sta_id; + tid_data = &priv->tid_data[sta_id][tid]; + if (tid_data->agg.state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(priv, + "try to aggregate tid %d\n", + tid); + rs_tl_turn_on_agg(priv, tid, + lq_sta, sta); + } + } + rs_set_stay_in_table(priv, 0, lq_sta); + } + } + +out: + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); + lq_sta->last_txrate_idx = index; +} + +/** + * rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void rs_initialize_lq(struct iwl_priv *priv, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = rs_use_green(sta); + u8 active_tbl = 0; + u8 valid_tx_ant; + struct iwl_station_priv *sta_priv; + struct iwl_rxon_context *ctx; + + if (!sta || !lq_sta) + return; + + sta_priv = (void *)sta->drv_priv; + ctx = sta_priv->ctx; + + i = lq_sta->last_txrate_idx; + + valid_tx_ant = priv->nvm_data->valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + rate = iwl_rates[i].plcp; + tbl->ant_type = first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); + if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); + tbl->current_rate = rate; + rs_set_expected_tpt_table(lq_sta, tbl); + rs_fill_link_cmd(NULL, lq_sta, rate); + priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; + iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true); +} + +static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct iwl_op_mode *op_mode __maybe_unused = + (struct iwl_op_mode *)priv_r; + struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = priv_sta; + int rate_idx; + + IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && + (lq_sta->max_rate_idx != -1)) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((lq_sta->max_rate_idx < 0) || + (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) + lq_sta->max_rate_idx = -1; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + priv_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IWL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS index */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO3_6M_PLCP) + rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM); + else if (rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_INDEX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA; + if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || + ((sband->band == IEEE80211_BAND_5GHZ) && + (rate_idx < IWL_FIRST_OFDM_RATE))) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust index */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; + info->control.rates[0].count = 1; +} + +static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv; + struct iwl_op_mode *op_mode __maybe_unused = + (struct iwl_op_mode *)priv_rate; + struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode); + + IWL_DEBUG_RATE(priv, "create station rate scale window\n"); + + return &sta_priv->lq_sta; +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id) +{ + int i, j; + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &priv->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct iwl_station_priv *sta_priv; + struct iwl_lq_sta *lq_sta; + struct ieee80211_supported_band *sband; + unsigned long supp; /* must be unsigned long for for_each_set_bit */ + + sta_priv = (struct iwl_station_priv *) sta->drv_priv; + lq_sta = &sta_priv->lq_sta; + sband = hw->wiphy->bands[conf->chandef.chan->band]; + + + lq_sta->lq.sta_id = sta_id; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + + IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n", + sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->is_dup = 0; + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; + lq_sta->is_green = rs_use_green(sta); + lq_sta->band = sband->band; + /* + * active legacy rates as per supported rates bitmap + */ + supp = sta->supp_rates[sband->band]; + lq_sta->active_legacy_rate = 0; + for_each_set_bit(i, &supp, BITS_PER_LONG) + lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); + + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1; + lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1; + lq_sta->active_mimo3_rate &= ~((u16)0x2); + lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; + + IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", + lq_sta->active_siso_rate, + lq_sta->active_mimo2_rate, + lq_sta->active_mimo3_rate); + + /* These values will be overridden later */ + lq_sta->lq.general_params.single_stream_ant_msk = + first_antenna(priv->nvm_data->valid_tx_ant); + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->nvm_data->valid_tx_ant & + ~first_antenna(priv->nvm_data->valid_tx_ant); + if (!lq_sta->lq.general_params.dual_stream_ant_msk) { + lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; + } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) { + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->nvm_data->valid_tx_ant; + } + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->drv = priv; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->dbg_fixed_rate = 0; +#endif + + rs_initialize_lq(priv, sta, lq_sta); +} + +static void rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 new_rate) +{ + struct iwl_scale_tbl_info tbl_type; + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (index 0) if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Interpret new_rate (rate_n_flags) */ + rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + if (priv && priv->bt_full_concurrent) { + /* 1x1 only */ + tbl_type.ant_type = + first_antenna(priv->nvm_data->valid_tx_ant); + } + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = min(IWL_HT_NUMBER_TRY, + LINK_QUAL_AGG_DISABLE_START_DEF - 1); + } + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (index 0) */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + if (num_of_ant(tbl_type.ant_type) == 1) { + lq_cmd->general_params.single_stream_ant_msk = + tbl_type.ant_type; + } else if (num_of_ant(tbl_type.ant_type) == 2) { + lq_cmd->general_params.dual_stream_ant_msk = + tbl_type.ant_type; + } /* otherwise we don't modify the existing value */ + + index++; + repeat_rate--; + if (priv) { + if (priv->bt_full_concurrent) + valid_tx_ant = ANT_A; + else + valid_tx_ant = priv->nvm_data->valid_tx_ant; + } + + /* Fill rest of rate table */ + while (index < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + } + + /* Override next rate if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate); + repeat_rate--; + index++; + } + + rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, + &rate_idx); + + if (priv && priv->bt_full_concurrent) { + /* 1x1 only */ + tbl_type.ant_type = + first_antenna(priv->nvm_data->valid_tx_ant); + } + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + /* Get next rate */ + new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + index++; + repeat_rate--; + } + + lq_cmd->agg_params.agg_frame_cnt_limit = + sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + /* + * overwrite if needed, pass aggregation time limit + * to uCode in uSec + */ + if (priv && priv->lib->bt_params && + priv->lib->bt_params->agg_time_limit && + priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(priv->lib->bt_params->agg_time_limit); +} + +static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void rs_free(void *priv_rate) +{ + return; +} + +static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl_op_mode *op_mode __maybe_unused = priv_r; + struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode); + + IWL_DEBUG_RATE(priv, "enter\n"); + IWL_DEBUG_RATE(priv, "leave\n"); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{ + struct iwl_priv *priv; + u8 valid_tx_ant; + u8 ant_sel_tx; + + priv = lq_sta->drv; + valid_tx_ant = priv->nvm_data->valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) + >> RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IWL_ERR(priv, + "Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } + } else { + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } +} + +static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + char buf[64]; + size_t buf_size; + u32 parsed_rate; + + + priv = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + rs_program_fix_rate(priv, lq_sta); + + return count; +} + +static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int index = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + priv = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "", + (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "", + (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += sprintf(buff + desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : + ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); + desc += sprintf(buff + desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += sprintf(buff + desc, " %s %s %s\n", + (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.general_params.flags, + lq_sta->lq.general_params.mimo_delimiter, + lq_sta->lq.general_params.single_stream_ant_msk, + lq_sta->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), + lq_sta->lq.agg_params.agg_dis_start_th, + lq_sta->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.general_params.start_rate_index[0], + lq_sta->lq.general_params.start_rate_index[1], + lq_sta->lq.general_params.start_rate_index[2], + lq_sta->lq.general_params.start_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + index = iwl_hwrate_to_plcp_idx( + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); + if (is_legacy(tbl->lq_type)) { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps); + } else { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = rs_sta_dbgfs_scale_table_write, + .read = rs_sta_dbgfs_scale_table_read, + .open = simple_open, + .llseek = default_llseek, +}; +static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, + "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" + "rate=0x%X\n", + lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->lq_info[i].is_dup, + lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = rs_sta_dbgfs_stats_table_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + char buff[120]; + int desc = 0; + + if (is_Ht(tbl->lq_type)) + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); + + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = rs_sta_dbgfs_rate_scale_data_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static void rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + +} + +static void rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta) +{ +} + +static const struct rate_control_ops rs_ops = { + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init_stub, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rs_add_debugfs, + .remove_sta_debugfs = rs_remove_debugfs, +#endif +}; + +int iwlagn_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_ops); +} + +void iwlagn_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h new file mode 100644 index 000000000000..f6bd25cad203 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -0,0 +1,426 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_agn_rs_h__ +#define __iwl_agn_rs_h__ + +#include + +#include "iwl-config.h" + +#include "commands.h" + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +/* + * These serve as indexes into + * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/ + IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + +/* uCode API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */ + /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ +}; + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 0x8, + IWL_RATE_MIMO2_12M_PLCP = 0x9, + IWL_RATE_MIMO2_18M_PLCP = 0xa, + IWL_RATE_MIMO2_24M_PLCP = 0xb, + IWL_RATE_MIMO2_36M_PLCP = 0xc, + IWL_RATE_MIMO2_48M_PLCP = 0xd, + IWL_RATE_MIMO2_54M_PLCP = 0xe, + IWL_RATE_MIMO2_60M_PLCP = 0xf, + IWL_RATE_MIMO3_6M_PLCP = 0x10, + IWL_RATE_MIMO3_12M_PLCP = 0x11, + IWL_RATE_MIMO3_18M_PLCP = 0x12, + IWL_RATE_MIMO3_24M_PLCP = 0x13, + IWL_RATE_MIMO3_36M_PLCP = 0x14, + IWL_RATE_MIMO3_48M_PLCP = 0x15, + IWL_RATE_MIMO3_54M_PLCP = 0x16, + IWL_RATE_MIMO3_60M_PLCP = 0x17, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IWL_RS_GOOD_RATIO 12800 /* 100% */ +#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ +#define IWL_RATE_HIGH_TH 10880 /* 85% */ +#define IWL_RATE_INCREASE_TH 6400 /* 50% */ +#define IWL_RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IWL_LEGACY_SWITCH_ANTENNA1 0 +#define IWL_LEGACY_SWITCH_ANTENNA2 1 +#define IWL_LEGACY_SWITCH_SISO 2 +#define IWL_LEGACY_SWITCH_MIMO2_AB 3 +#define IWL_LEGACY_SWITCH_MIMO2_AC 4 +#define IWL_LEGACY_SWITCH_MIMO2_BC 5 +#define IWL_LEGACY_SWITCH_MIMO3_ABC 6 + +/* possible actions when in siso mode */ +#define IWL_SISO_SWITCH_ANTENNA1 0 +#define IWL_SISO_SWITCH_ANTENNA2 1 +#define IWL_SISO_SWITCH_MIMO2_AB 2 +#define IWL_SISO_SWITCH_MIMO2_AC 3 +#define IWL_SISO_SWITCH_MIMO2_BC 4 +#define IWL_SISO_SWITCH_GI 5 +#define IWL_SISO_SWITCH_MIMO3_ABC 6 + + +/* possible actions when in mimo mode */ +#define IWL_MIMO2_SWITCH_ANTENNA1 0 +#define IWL_MIMO2_SWITCH_ANTENNA2 1 +#define IWL_MIMO2_SWITCH_SISO_A 2 +#define IWL_MIMO2_SWITCH_SISO_B 3 +#define IWL_MIMO2_SWITCH_SISO_C 4 +#define IWL_MIMO2_SWITCH_GI 5 +#define IWL_MIMO2_SWITCH_MIMO3_ABC 6 + + +/* possible actions when in mimo3 mode */ +#define IWL_MIMO3_SWITCH_ANTENNA1 0 +#define IWL_MIMO3_SWITCH_ANTENNA2 1 +#define IWL_MIMO3_SWITCH_SISO_A 2 +#define IWL_MIMO3_SWITCH_SISO_B 3 +#define IWL_MIMO3_SWITCH_SISO_C 4 +#define IWL_MIMO3_SWITCH_MIMO2_AB 5 +#define IWL_MIMO3_SWITCH_MIMO2_AC 6 +#define IWL_MIMO3_SWITCH_MIMO2_BC 7 +#define IWL_MIMO3_SWITCH_GI 8 + + +#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC + +/*FIXME:RS:add possible actions for MIMO3*/ + +#define IWL_ACTION_LIMIT 3 /* # possible actions */ + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_LOAD_THRESHOLD 10 +#define IWL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + +enum iwl_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MIMO3, + LQ_MAX, +}; + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) +#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 is_dup; /* 1 = duplicated data streams */ + u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ +}; + +struct iwl_traffic_load { + unsigned long time_stamp; /* age of the oldest statistics */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + u8 is_dup; + enum ieee80211_band band; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + u16 active_mimo3_rate; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct iwl_priv *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; + /* BT traffic this sta was last updated in */ + u8 last_bt_traffic; +}; + +static inline u8 first_antenna(u8 mask) +{ + if (mask & ANT_A) + return ANT_A; + if (mask & ANT_B) + return ANT_B; + return ANT_C; +} + + +/* Initialize station's rate scaling information after adding station */ +void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, + u8 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +int iwlagn_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +void iwlagn_rate_control_unregister(void); + +#endif /* __iwl_agn__rs__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c new file mode 100644 index 000000000000..4a45b0b594c7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -0,0 +1,1101 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portionhelp of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include "iwl-io.h" +#include "dev.h" +#include "calib.h" +#include "agn.h" + +#define IWL_CMD_ENTRY(x) [x] = #x + +const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = { + IWL_CMD_ENTRY(REPLY_ALIVE), + IWL_CMD_ENTRY(REPLY_ERROR), + IWL_CMD_ENTRY(REPLY_ECHO), + IWL_CMD_ENTRY(REPLY_RXON), + IWL_CMD_ENTRY(REPLY_RXON_ASSOC), + IWL_CMD_ENTRY(REPLY_QOS_PARAM), + IWL_CMD_ENTRY(REPLY_RXON_TIMING), + IWL_CMD_ENTRY(REPLY_ADD_STA), + IWL_CMD_ENTRY(REPLY_REMOVE_STA), + IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA), + IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH), + IWL_CMD_ENTRY(REPLY_WEPKEY), + IWL_CMD_ENTRY(REPLY_TX), + IWL_CMD_ENTRY(REPLY_LEDS_CMD), + IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD), + IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD), + IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION), + IWL_CMD_ENTRY(COEX_EVENT_CMD), + IWL_CMD_ENTRY(REPLY_QUIET_CMD), + IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH), + IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD), + IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION), + IWL_CMD_ENTRY(POWER_TABLE_CMD), + IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION), + IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC), + IWL_CMD_ENTRY(REPLY_SCAN_CMD), + IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD), + IWL_CMD_ENTRY(SCAN_START_NOTIFICATION), + IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION), + IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION), + IWL_CMD_ENTRY(BEACON_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_TX_BEACON), + IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION), + IWL_CMD_ENTRY(QUIET_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD), + IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_BT_CONFIG), + IWL_CMD_ENTRY(REPLY_STATISTICS_CMD), + IWL_CMD_ENTRY(STATISTICS_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD), + IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION), + IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD), + IWL_CMD_ENTRY(SENSITIVITY_CMD), + IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD), + IWL_CMD_ENTRY(REPLY_RX_PHY_CMD), + IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD), + IWL_CMD_ENTRY(REPLY_COMPRESSED_BA), + IWL_CMD_ENTRY(CALIBRATION_CFG_CMD), + IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION), + IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD), + IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION), + IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD), + IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF), + IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE), + IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV), + IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS), + IWL_CMD_ENTRY(REPLY_WIPAN_RXON), + IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING), + IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC), + IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM), + IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY), + IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH), + IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE), + IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS), + IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER), + IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS), + IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS), + IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL), + IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS), + IWL_CMD_ENTRY(REPLY_D3_CONFIG), +}; +#undef IWL_CMD_ENTRY + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ + +static void iwlagn_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_error_resp *err_resp = (void *)pkt->data; + + IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(err_resp->error_type), + err_resp->cmd_id, + le16_to_cpu(err_resp->bad_cmd_seq_num), + le32_to_cpu(err_resp->error_info)); +} + +static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_csa_notification *csa = (void *)pkt->data; + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_rxon_cmd *rxon = (void *)&ctx->active; + + if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + return; + + if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { + rxon->channel = csa->channel; + ctx->staging.channel = csa->channel; + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", + le16_to_cpu(csa->channel)); + iwl_chswitch_done(priv, true); + } else { + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + iwl_chswitch_done(priv, false); + } +} + + +static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_spectrum_notification *report = (void *)pkt->data; + + if (!report->state) { + IWL_DEBUG_11H(priv, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +} + +static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_sleep_notification *sleep = (void *)pkt->data; + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} + +static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 __maybe_unused len = iwl_rx_packet_len(pkt); + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " + "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len); + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); +} + +static void iwlagn_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwlagn_beacon_notif *beacon = (void *)pkt->data; +#ifdef CONFIG_IWLWIFI_DEBUG + u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status); + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " + "tsf:0x%.8x%.8x rate:%d\n", + status & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +/** + * iwl_good_plcp_health - checks for plcp error. + * + * When the plcp error is exceeding the thresholds, reset the radio + * to improve the throughput. + */ +static bool iwlagn_good_plcp_health(struct iwl_priv *priv, + struct statistics_rx_phy *cur_ofdm, + struct statistics_rx_ht_phy *cur_ofdm_ht, + unsigned int msecs) +{ + int delta; + int threshold = priv->plcp_delta_threshold; + + if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { + IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); + return true; + } + + delta = le32_to_cpu(cur_ofdm->plcp_err) - + le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) + + le32_to_cpu(cur_ofdm_ht->plcp_err) - + le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err); + + /* Can be negative if firmware reset statistics */ + if (delta <= 0) + return true; + + if ((delta * 100 / msecs) > threshold) { + IWL_DEBUG_RADIO(priv, + "plcp health threshold %u delta %d msecs %u\n", + threshold, delta, msecs); + return false; + } + + return true; +} + +int iwl_force_rf_reset(struct iwl_priv *priv, bool external) +{ + struct iwl_rf_reset *rf_reset; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EAGAIN; + + if (!iwl_is_any_associated(priv)) { + IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); + return -ENOLINK; + } + + rf_reset = &priv->rf_reset; + rf_reset->reset_request_count++; + if (!external && rf_reset->last_reset_jiffies && + time_after(rf_reset->last_reset_jiffies + + IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) { + IWL_DEBUG_INFO(priv, "RF reset rejected\n"); + rf_reset->reset_reject_count++; + return -EAGAIN; + } + rf_reset->reset_success_count++; + rf_reset->last_reset_jiffies = jiffies; + + /* + * There is no easy and better way to force reset the radio, + * the only known method is switching channel which will force to + * reset and tune the radio. + * Use internal short scan (single channel) operation to should + * achieve this objective. + * Driver should reset the radio when number of consecutive missed + * beacon, or any other uCode error condition detected. + */ + IWL_DEBUG_INFO(priv, "perform radio reset.\n"); + iwl_internal_short_hw_scan(priv); + return 0; +} + + +static void iwlagn_recover_from_statistics(struct iwl_priv *priv, + struct statistics_rx_phy *cur_ofdm, + struct statistics_rx_ht_phy *cur_ofdm_ht, + struct statistics_tx *tx, + unsigned long stamp) +{ + unsigned int msecs; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); + + /* Only gather statistics and update time stamp when not associated */ + if (!iwl_is_any_associated(priv)) + return; + + /* Do not check/recover when do not have enough statistics data */ + if (msecs < 99) + return; + + if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs)) + iwl_force_rf_reset(priv, false); +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwlagn_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info; + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a, bcn_silence_b, bcn_silence_c; + int last_rx_noise; + + rx_info = &priv->statistics.rx_non_phy; + + bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + last_rx_noise = (total_silence / num_active_rx) - 107; + else + last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + last_rx_noise); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +/* + * based on the assumption of all statistics counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta, + __le32 *max_delta, __le32 *accum, int size) +{ + int i; + + for (i = 0; + i < size / sizeof(__le32); + i++, prev++, cur++, delta++, max_delta++, accum++) { + if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) { + *delta = cpu_to_le32( + le32_to_cpu(*cur) - le32_to_cpu(*prev)); + le32_add_cpu(accum, le32_to_cpu(*delta)); + if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta)) + *max_delta = *delta; + } + } +} + +static void +iwlagn_accumulative_statistics(struct iwl_priv *priv, + struct statistics_general_common *common, + struct statistics_rx_non_phy *rx_non_phy, + struct statistics_rx_phy *rx_ofdm, + struct statistics_rx_ht_phy *rx_ofdm_ht, + struct statistics_rx_phy *rx_cck, + struct statistics_tx *tx, + struct statistics_bt_activity *bt_activity) +{ +#define ACCUM(_name) \ + accum_stats((__le32 *)&priv->statistics._name, \ + (__le32 *)_name, \ + (__le32 *)&priv->delta_stats._name, \ + (__le32 *)&priv->max_delta_stats._name, \ + (__le32 *)&priv->accum_stats._name, \ + sizeof(*_name)); + + ACCUM(common); + ACCUM(rx_non_phy); + ACCUM(rx_ofdm); + ACCUM(rx_ofdm_ht); + ACCUM(rx_cck); + ACCUM(tx); + if (bt_activity) + ACCUM(bt_activity); +#undef ACCUM +} +#else +static inline void +iwlagn_accumulative_statistics(struct iwl_priv *priv, + struct statistics_general_common *common, + struct statistics_rx_non_phy *rx_non_phy, + struct statistics_rx_phy *rx_ofdm, + struct statistics_rx_ht_phy *rx_ofdm_ht, + struct statistics_rx_phy *rx_cck, + struct statistics_tx *tx, + struct statistics_bt_activity *bt_activity) +{ +} +#endif + +static void iwlagn_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + unsigned long stamp = jiffies; + const int reg_recalib_period = 60; + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 len = iwl_rx_packet_payload_len(pkt); + __le32 *flag; + struct statistics_general_common *common; + struct statistics_rx_non_phy *rx_non_phy; + struct statistics_rx_phy *rx_ofdm; + struct statistics_rx_ht_phy *rx_ofdm_ht; + struct statistics_rx_phy *rx_cck; + struct statistics_tx *tx; + struct statistics_bt_activity *bt_activity; + + IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n", + len); + + spin_lock(&priv->statistics.lock); + + if (len == sizeof(struct iwl_bt_notif_statistics)) { + struct iwl_bt_notif_statistics *stats; + stats = (void *)&pkt->data; + flag = &stats->flag; + common = &stats->general.common; + rx_non_phy = &stats->rx.general.common; + rx_ofdm = &stats->rx.ofdm; + rx_ofdm_ht = &stats->rx.ofdm_ht; + rx_cck = &stats->rx.cck; + tx = &stats->tx; + bt_activity = &stats->general.activity; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* handle this exception directly */ + priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills; + le32_add_cpu(&priv->statistics.accum_num_bt_kills, + le32_to_cpu(stats->rx.general.num_bt_kills)); +#endif + } else if (len == sizeof(struct iwl_notif_statistics)) { + struct iwl_notif_statistics *stats; + stats = (void *)&pkt->data; + flag = &stats->flag; + common = &stats->general.common; + rx_non_phy = &stats->rx.general; + rx_ofdm = &stats->rx.ofdm; + rx_ofdm_ht = &stats->rx.ofdm_ht; + rx_cck = &stats->rx.cck; + tx = &stats->tx; + bt_activity = NULL; + } else { + WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n", + len, sizeof(struct iwl_bt_notif_statistics), + sizeof(struct iwl_notif_statistics)); + spin_unlock(&priv->statistics.lock); + return; + } + + change = common->temperature != priv->statistics.common.temperature || + (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK); + + iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm, + rx_ofdm_ht, rx_cck, tx, bt_activity); + + iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp); + + priv->statistics.flag = *flag; + memcpy(&priv->statistics.common, common, sizeof(*common)); + memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy)); + memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm)); + memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht)); + memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck)); + memcpy(&priv->statistics.tx, tx, sizeof(*tx)); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (bt_activity) + memcpy(&priv->statistics.bt_activity, bt_activity, + sizeof(*bt_activity)); +#endif + + priv->rx_statistics_jiffies = stamp; + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * reg_recalib_period seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(reg_recalib_period * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwlagn_rx_calc_noise(priv); + queue_work(priv->workqueue, &priv->run_time_calib_work); + } + if (priv->lib->temperature && change) + priv->lib->temperature(priv); + + spin_unlock(&priv->statistics.lock); +} + +static void iwlagn_rx_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_notif_statistics *stats = (void *)pkt->data; + + if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + memset(&priv->accum_stats, 0, + sizeof(priv->accum_stats)); + memset(&priv->delta_stats, 0, + sizeof(priv->delta_stats)); + memset(&priv->max_delta_stats, 0, + sizeof(priv->max_delta_stats)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + + iwlagn_rx_statistics(priv, rxb); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwlagn_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; + u32 flags = le32_to_cpu(card_state_notif->flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + if (flags & CT_CARD_DISABLED) + iwl_tt_enter_ct_kill(priv); + } + if (!(flags & CT_CARD_DISABLED)) + iwl_tt_exit_ct_kill(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (!(flags & RXON_CARD_DISABLED)) + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); +} + +static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) + +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data; + + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + priv->missed_beacon_threshold) { + IWL_DEBUG_CALIB(priv, + "missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(STATUS_SCANNING, &priv->status)) + iwl_init_sensitivity(priv); + } +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + priv->last_phy_res_valid = true; + priv->ampdu_ref++; + memcpy(&priv->last_phy_res, pkt->data, + sizeof(struct iwl_rx_phy_res)); +} + +/* + * returns non-zero if packet should be dropped + */ +static int iwlagn_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + + /* + * All contexts have the same setting here due to it being + * a module parameter, so OK to check any context. + */ + if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & + RXON_FILTER_DIS_DECRYPT_MSK) + return 0; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return 0; + + IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + /* The uCode has got a bad phase 1 Key, pushes the packet. + * Decryption will be done in SW. */ + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_KEY_TTAK) + break; + + case RX_RES_STATUS_SEC_TYPE_WEP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) { + /* bad ICV, the packet is destroyed since the + * decryption is inplace, drop it */ + IWL_DEBUG_RX(priv, "Packet destroyed\n"); + return -1; + } + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } + return 0; +} + +static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u16 len, + u32 ampdu_status, + struct iwl_rx_cmd_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + struct iwl_rxon_context *ctx; + unsigned int hdrlen, fraglen; + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!iwlwifi_mod_params.sw_crypto && + iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) + return; + + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(priv, "alloc_skb failed\n"); + return; + } + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr so that splice() or TCP coalesce + * are more efficient. + */ + hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); + + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); + fraglen = len - hdrlen; + + if (fraglen) { + int offset = (void *)hdr + hdrlen - + rxb_addr(rxb) + rxb_offset(rxb); + + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, + fraglen, rxb->truesize); + } + + /* + * Wake any queues that were stopped due to a passive channel tx + * failure. This can happen because the regulatory enforcement in + * the device waits for a beacon before allowing transmission, + * sometimes even after already having transmitted frames for the + * association because the new RXON may reset the information. + */ + if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) { + for_each_context(priv, ctx) { + if (!ether_addr_equal(hdr->addr3, + ctx->active.bssid_addr)) + continue; + iwlagn_lift_passive_no_rx(priv); + } + } + + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx_napi(priv->hw, skb, priv->napi); +} + +static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + } + + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", + decrypt_in, decrypt_out); + + return decrypt_out; +} + +/* Calc max signal level (dBm) among 3 possible receivers */ +static int iwlagn_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host + */ + struct iwlagn_non_cfg_phy *ncphy = + (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 val, rssi_a, rssi_b, rssi_c, max_rssi; + u8 agc; + + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]); + agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. + */ + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]); + rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >> + IWLAGN_OFDM_RSSI_A_BIT_POS; + rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >> + IWLAGN_OFDM_RSSI_B_BIT_POS; + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]); + rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >> + IWLAGN_OFDM_RSSI_C_BIT_POS; + + max_rssi = max_t(u32, rssi_a, rssi_b); + max_rssi = max_t(u32, max_rssi, rssi_c); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + rssi_a, rssi_b, rssi_c, max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWLAGN_RSSI_OFFSET; +} + +/* Called for REPLY_RX_MPDU_CMD */ +static void iwlagn_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status = {}; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct iwl_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + if (!priv->last_phy_res_valid) { + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); + return; + } + phy_res = &priv->last_phy_res; + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data; + header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len); + ampdu_status = iwlagn_translate_rx_status(priv, + le32_to_cpu(rx_pkt_status)); + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), + rx_status.band); + rx_status.rate_idx = + iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_MACTIME_START;*/ + + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = iwlagn_calc_rssi(priv, phy_res); + + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", + rx_status.signal, (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { + /* + * We know which subframes of an A-MPDU belong + * together since we get a single PHY response + * from the firmware for all of them + */ + rx_status.flag |= RX_FLAG_AMPDU_DETAILS; + rx_status.ampdu_reference = priv->ampdu_ref; + } + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + if (rate_n_flags & RATE_MCS_GF_MSK) + rx_status.flag |= RX_FLAG_HT_GF; + + iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, + rxb, &rx_status); +} + +static void iwlagn_rx_noa_notification(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_wipan_noa_data *new_data, *old_data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data; + + /* no condition -- we're in softirq */ + old_data = rcu_dereference_protected(priv->noa_data, true); + + if (noa_notif->noa_active) { + u32 len = le16_to_cpu(noa_notif->noa_attribute.length); + u32 copylen = len; + + /* EID, len, OUI, subtype */ + len += 1 + 1 + 3 + 1; + /* P2P id, P2P length */ + len += 1 + 2; + copylen += 1 + 2; + + new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC); + if (new_data) { + new_data->length = len; + new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC; + new_data->data[1] = len - 2; /* not counting EID, len */ + new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff; + new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff; + new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff; + new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P; + memcpy(&new_data->data[6], &noa_notif->noa_attribute, + copylen); + } + } else + new_data = NULL; + + rcu_assign_pointer(priv->noa_data, new_data); + + if (old_data) + kfree_rcu(old_data, rcu_head); +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + */ +void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); + + handlers = priv->rx_handlers; + + handlers[REPLY_ERROR] = iwlagn_rx_reply_error; + handlers[CHANNEL_SWITCH_NOTIFICATION] = iwlagn_rx_csa; + handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwlagn_rx_spectrum_measure_notif; + handlers[PM_SLEEP_NOTIFICATION] = iwlagn_rx_pm_sleep_notif; + handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwlagn_rx_pm_debug_statistics_notif; + handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif; + handlers[REPLY_ADD_STA] = iwl_add_sta_callback; + + handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + handlers[REPLY_STATISTICS_CMD] = iwlagn_rx_reply_statistics; + handlers[STATISTICS_NOTIFICATION] = iwlagn_rx_statistics; + + iwl_setup_rx_scan_handlers(priv); + + handlers[CARD_STATE_NOTIFICATION] = iwlagn_rx_card_state_notif; + handlers[MISSED_BEACONS_NOTIFICATION] = + iwlagn_rx_missed_beacon_notif; + + /* Rx handlers */ + handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy; + handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx; + + /* block ack */ + handlers[REPLY_COMPRESSED_BA] = + iwlagn_rx_reply_compressed_ba; + + priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; + + /* set up notification wait support */ + iwl_notification_wait_init(&priv->notif_wait); + + /* Set up BT Rx handlers */ + if (priv->lib->bt_params) + iwlagn_bt_rx_handler_setup(priv); +} + +void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + /* + * Do the notification wait before RX handlers so + * even if the RX handler consumes the RXB we have + * access to it in the notification wait entry. + */ + iwl_notification_wait_notify(&priv->notif_wait, pkt); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + priv->rx_handlers_stats[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd](priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n", + iwl_dvm_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c new file mode 100644 index 000000000000..85ceceb34fcc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -0,0 +1,1572 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include "iwl-trans.h" +#include "iwl-modparams.h" +#include "dev.h" +#include "agn.h" +#include "calib.h" + +/* + * initialize rxon structure with default values from eeprom + */ +void iwl_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + memset(&ctx->staging, 0, sizeof(ctx->staging)); + + if (!ctx->vif) { + ctx->staging.dev_type = ctx->unused_devtype; + } else + switch (ctx->vif->type) { + case NL80211_IFTYPE_AP: + ctx->staging.dev_type = ctx->ap_devtype; + break; + + case NL80211_IFTYPE_STATION: + ctx->staging.dev_type = ctx->station_devtype; + ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + ctx->staging.dev_type = ctx->ibss_devtype; + ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_MONITOR: + ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER; + break; + + default: + IWL_ERR(priv, "Unsupported interface type %d\n", + ctx->vif->type); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ctx->staging.channel = + cpu_to_le16(priv->hw->conf.chandef.chan->hw_value); + priv->band = priv->hw->conf.chandef.chan->band; + + iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); + + /* clear both MIX and PURE40 mode flag */ + ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | + RXON_FLG_CHANNEL_MODE_PURE_40); + if (ctx->vif) + memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); + + ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; +} + +static int iwlagn_disable_bss(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_rxon_cmd *send) +{ + __le32 old_filter = send->filter_flags; + int ret; + + send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, + 0, sizeof(*send), send); + + send->filter_flags = old_filter; + + if (ret) + IWL_DEBUG_QUIET_RFKILL(priv, + "Error clearing ASSOC_MSK on BSS (%d)\n", ret); + + return ret; +} + +static int iwlagn_disable_pan(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_rxon_cmd *send) +{ + struct iwl_notification_wait disable_wait; + __le32 old_filter = send->filter_flags; + u8 old_dev_type = send->dev_type; + int ret; + static const u16 deactivate_cmd[] = { + REPLY_WIPAN_DEACTIVATION_COMPLETE + }; + + iwl_init_notification_wait(&priv->notif_wait, &disable_wait, + deactivate_cmd, ARRAY_SIZE(deactivate_cmd), + NULL, NULL); + + send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + send->dev_type = RXON_DEV_TYPE_P2P; + ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, + 0, sizeof(*send), send); + + send->filter_flags = old_filter; + send->dev_type = old_dev_type; + + if (ret) { + IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); + iwl_remove_notification(&priv->notif_wait, &disable_wait); + } else { + ret = iwl_wait_notification(&priv->notif_wait, + &disable_wait, HZ); + if (ret) + IWL_ERR(priv, "Timed out waiting for PAN disable\n"); + } + + return ret; +} + +static int iwlagn_disconn_pan(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_rxon_cmd *send) +{ + __le32 old_filter = send->filter_flags; + int ret; + + send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, + sizeof(*send), send); + + send->filter_flags = old_filter; + + return ret; +} + +static void iwlagn_update_qos(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int ret; + + if (!ctx->is_active) + return; + + ctx->qos_data.def_qos_parm.qos_flags = 0; + + if (ctx->qos_data.qos_active) + ctx->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + if (ctx->ht.enabled) + ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + + IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + ctx->qos_data.qos_active, + ctx->qos_data.def_qos_parm.qos_flags); + + ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0, + sizeof(struct iwl_qosparam_cmd), + &ctx->qos_data.def_qos_parm); + if (ret) + IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); +} + +static int iwlagn_update_beacon(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + lockdep_assert_held(&priv->mutex); + + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif); + if (!priv->beacon_skb) + return -ENOMEM; + return iwlagn_send_beacon_cmd(priv); +} + +static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int ret = 0; + struct iwl_rxon_assoc_cmd rxon_assoc; + const struct iwl_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_rxon_cmd *rxon2 = &ctx->active; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->ofdm_ht_triple_stream_basic_rates == + rxon2->ofdm_ht_triple_stream_basic_rates) && + (rxon1->acquisition_data == rxon2->acquisition_data) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved1 = 0; + rxon_assoc.reserved2 = 0; + rxon_assoc.reserved3 = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + ctx->staging.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + ctx->staging.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; + rxon_assoc.ofdm_ht_triple_stream_basic_rates = + ctx->staging.ofdm_ht_triple_stream_basic_rates; + rxon_assoc.acquisition_data = ctx->staging.acquisition_data; + + ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd, + CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc); + return ret; +} + +static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val; + u16 beacon_factor; + + /* + * If mac80211 hasn't given us a beacon interval, program + * the default into the device (not checking this here + * would cause the adjustment below to return the maximum + * value, which may break PAN.) + */ + if (!beacon_val) + return DEFAULT_BEACON_INTERVAL; + + /* + * If the beacon interval we obtained from the peer + * is too large, we'll have to wake up more often + * (and in IBSS case, we'll beacon too much) + * + * For example, if max_beacon_val is 4096, and the + * requested beacon interval is 7000, we'll have to + * use 3500 to be able to wake up on the beacons. + * + * This could badly influence beacon detection stats. + */ + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +static int iwl_send_rxon_timing(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + u64 tsf; + s32 interval_tm, rem; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + struct ieee80211_vif *vif = ctx->vif; + + conf = &priv->hw->conf; + + lockdep_assert_held(&priv->mutex); + + memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); + + ctx->timing.timestamp = cpu_to_le64(priv->timestamp); + ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); + + beacon_int = vif ? vif->bss_conf.beacon_int : 0; + + /* + * TODO: For IBSS we need to get atim_window from mac80211, + * for now just always use 0 + */ + ctx->timing.atim_window = 0; + + if (ctx->ctxid == IWL_RXON_CTX_PAN && + (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) && + iwl_is_associated(priv, IWL_RXON_CTX_BSS) && + priv->contexts[IWL_RXON_CTX_BSS].vif && + priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) { + ctx->timing.beacon_interval = + priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval; + beacon_int = le16_to_cpu(ctx->timing.beacon_interval); + } else if (ctx->ctxid == IWL_RXON_CTX_BSS && + iwl_is_associated(priv, IWL_RXON_CTX_PAN) && + priv->contexts[IWL_RXON_CTX_PAN].vif && + priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int && + (!iwl_is_associated_ctx(ctx) || !ctx->vif || + !ctx->vif->bss_conf.beacon_int)) { + ctx->timing.beacon_interval = + priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval; + beacon_int = le16_to_cpu(ctx->timing.beacon_interval); + } else { + beacon_int = iwl_adjust_beacon_interval(beacon_int, + IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT); + ctx->timing.beacon_interval = cpu_to_le16(beacon_int); + } + + ctx->beacon_int = beacon_int; + + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * TIME_UNIT; + rem = do_div(tsf, interval_tm); + ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; + + IWL_DEBUG_ASSOC(priv, + "beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(ctx->timing.beacon_interval), + le32_to_cpu(ctx->timing.beacon_init_val), + le16_to_cpu(ctx->timing.atim_window)); + + return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, + 0, sizeof(ctx->timing), &ctx->timing); +} + +static int iwlagn_rxon_disconn(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int ret; + struct iwl_rxon_cmd *active = (void *)&ctx->active; + + if (ctx->ctxid == IWL_RXON_CTX_BSS) { + ret = iwlagn_disable_bss(priv, ctx, &ctx->staging); + } else { + ret = iwlagn_disable_pan(priv, ctx, &ctx->staging); + if (ret) + return ret; + if (ctx->vif) { + ret = iwl_send_rxon_timing(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); + return ret; + } + ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging); + } + } + if (ret) + return ret; + + /* + * Un-assoc RXON clears the station table and WEP + * keys, so we have to restore those afterwards. + */ + iwl_clear_ucode_stations(priv, ctx); + /* update -- might need P2P now */ + iwl_update_bcast_station(priv, ctx); + iwl_restore_stations(priv, ctx); + ret = iwl_restore_default_wep_keys(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); + return ret; + } + + memcpy(active, &ctx->staging, sizeof(*active)); + return 0; +} + +static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) +{ + int ret; + s8 prev_tx_power; + bool defer; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED) + return 0; + + lockdep_assert_held(&priv->mutex); + + if (priv->tx_power_user_lmt == tx_power && !force) + return 0; + + if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { + IWL_WARN(priv, + "Requested user TXPOWER %d below lower limit %d.\n", + tx_power, + IWLAGN_TX_POWER_TARGET_POWER_MIN); + return -EINVAL; + } + + if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) { + IWL_WARN(priv, + "Requested user TXPOWER %d above upper limit %d.\n", + tx_power, priv->nvm_data->max_tx_pwr_half_dbm); + return -EINVAL; + } + + if (!iwl_is_ready_rf(priv)) + return -EIO; + + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ + priv->tx_power_next = tx_power; + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(STATUS_SCANNING, &priv->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); + return 0; + } + + prev_tx_power = priv->tx_power_user_lmt; + priv->tx_power_user_lmt = tx_power; + + ret = iwlagn_send_tx_power(priv); + + /* if fail to set tx_power, restore the orig. tx power */ + if (ret) { + priv->tx_power_user_lmt = prev_tx_power; + priv->tx_power_next = prev_tx_power; + } + return ret; +} + +static int iwlagn_rxon_connect(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int ret; + struct iwl_rxon_cmd *active = (void *)&ctx->active; + + /* RXON timing must be before associated RXON */ + if (ctx->ctxid == IWL_RXON_CTX_BSS) { + ret = iwl_send_rxon_timing(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); + return ret; + } + } + /* QoS info may be cleared by previous un-assoc RXON */ + iwlagn_update_qos(priv, ctx); + + /* + * We'll run into this code path when beaconing is + * enabled, but then we also need to send the beacon + * to the device. + */ + if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) { + ret = iwlagn_update_beacon(priv, ctx->vif); + if (ret) { + IWL_ERR(priv, + "Error sending required beacon (%d)!\n", + ret); + return ret; + } + } + + priv->start_calib = 0; + /* + * Apply the new configuration. + * + * Associated RXON doesn't clear the station table in uCode, + * so we don't need to restore stations etc. after this. + */ + ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, + sizeof(struct iwl_rxon_cmd), &ctx->staging); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active, &ctx->staging, sizeof(*active)); + + /* IBSS beacon needs to be sent after setting assoc */ + if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC)) + if (iwlagn_update_beacon(priv, ctx->vif)) + IWL_ERR(priv, "Error sending IBSS beacon\n"); + iwl_init_sensitivity(priv); + + /* + * If we issue a new RXON command which required a tune then + * we must send a new TXPOWER command or we won't be able to + * Tx any frames. + * + * It's expected we set power here if channel is changing. + */ + ret = iwl_set_tx_power(priv, priv->tx_power_next, true); + if (ret) { + IWL_ERR(priv, "Error sending TX power (%d)\n", ret); + return ret; + } + + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && + priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) + ieee80211_request_smps(ctx->vif, + priv->cfg->ht_params->smps_mode); + + return 0; +} + +int iwlagn_set_pan_params(struct iwl_priv *priv) +{ + struct iwl_wipan_params_cmd cmd; + struct iwl_rxon_context *ctx_bss, *ctx_pan; + int slot0 = 300, slot1 = 0; + int ret; + + if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) + return 0; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + lockdep_assert_held(&priv->mutex); + + ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; + ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; + + /* + * If the PAN context is inactive, then we don't need + * to update the PAN parameters, the last thing we'll + * have done before it goes inactive is making the PAN + * parameters be WLAN-only. + */ + if (!ctx_pan->is_active) + return 0; + + memset(&cmd, 0, sizeof(cmd)); + + /* only 2 slots are currently allowed */ + cmd.num_slots = 2; + + cmd.slots[0].type = 0; /* BSS */ + cmd.slots[1].type = 1; /* PAN */ + + if (ctx_bss->vif && ctx_pan->vif) { + int bcnint = ctx_pan->beacon_int; + int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; + + /* should be set, but seems unused?? */ + cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); + + if (ctx_pan->vif->type == NL80211_IFTYPE_AP && + bcnint && + bcnint != ctx_bss->beacon_int) { + IWL_ERR(priv, + "beacon intervals don't match (%d, %d)\n", + ctx_bss->beacon_int, ctx_pan->beacon_int); + } else + bcnint = max_t(int, bcnint, + ctx_bss->beacon_int); + if (!bcnint) + bcnint = DEFAULT_BEACON_INTERVAL; + slot0 = bcnint / 2; + slot1 = bcnint - slot0; + + if (test_bit(STATUS_SCAN_HW, &priv->status) || + (!ctx_bss->vif->bss_conf.idle && + !ctx_bss->vif->bss_conf.assoc)) { + slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; + slot1 = IWL_MIN_SLOT_TIME; + } else if (!ctx_pan->vif->bss_conf.idle && + !ctx_pan->vif->bss_conf.assoc) { + slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; + slot0 = IWL_MIN_SLOT_TIME; + } + } else if (ctx_pan->vif) { + slot0 = 0; + slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * + ctx_pan->beacon_int; + slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); + + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; + slot1 = IWL_MIN_SLOT_TIME; + } + } + + cmd.slots[0].width = cpu_to_le16(slot0); + cmd.slots[1].width = cpu_to_le16(slot1); + + ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); + + return ret; +} + +static void _iwl_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf, + struct iwl_rxon_context *ctx) +{ + struct iwl_rxon_cmd *rxon = &ctx->staging; + + if (!ctx->ht.enabled) { + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + return; + } + + /* FIXME: if the definition of ht.protection changed, the "translation" + * will be needed for rxon->flags + */ + rxon->flags |= cpu_to_le32(ctx->ht.protection << + RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) { + /* pure ht40 */ + if (ctx->ht.protection == + IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* + * Note: control channel is opposite of extension + * channel + */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* + * Note: control channel is opposite of extension + * channel + */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* + * channel location only valid if in Mixed + * mode + */ + IWL_ERR(priv, + "invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + iwlagn_set_rxon_chain(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", + le32_to_cpu(rxon->flags), ctx->ht.protection, + ctx->ht.extension_chan_offset); +} + +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) +{ + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) + _iwl_set_rxon_ht(priv, ht_conf, ctx); +} + +/** + * iwl_set_rxon_channel - Set the band and channel values in staging RXON + * @ch: requested channel as a pointer to struct ieee80211_channel + + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the ch->band + */ +void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx) +{ + enum ieee80211_band band = ch->band; + u16 channel = ch->hw_value; + + if ((le16_to_cpu(ctx->staging.channel) == channel) && + (priv->band == band)) + return; + + ctx->staging.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; + else + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + + priv->band = band; + + IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); + +} + +void iwl_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + if (band == IEEE80211_BAND_5GHZ) { + ctx->staging.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_post_associate() */ + if (vif && vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; + ctx->staging.flags &= ~RXON_FLG_CCK_MSK; + } +} + +static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &ctx->staging; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} + +/* validate RXON structure is valid */ +static int iwl_check_rxon_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_rxon_cmd *rxon = &ctx->staging; + u32 errors = 0; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { + IWL_WARN(priv, "check 2.4G: wrong narrow\n"); + errors |= BIT(0); + } + if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { + IWL_WARN(priv, "check 2.4G: wrong radar\n"); + errors |= BIT(1); + } + } else { + if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "check 5.2G: not short slot!\n"); + errors |= BIT(2); + } + if (rxon->flags & RXON_FLG_CCK_MSK) { + IWL_WARN(priv, "check 5.2G: CCK!\n"); + errors |= BIT(3); + } + } + if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { + IWL_WARN(priv, "mac/bssid mcast!\n"); + errors |= BIT(4); + } + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && + (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { + IWL_WARN(priv, "neither 1 nor 6 are basic\n"); + errors |= BIT(5); + } + + if (le16_to_cpu(rxon->assoc_id) > 2007) { + IWL_WARN(priv, "aid > 2007\n"); + errors |= BIT(6); + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "CCK and short slot\n"); + errors |= BIT(7); + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { + IWL_WARN(priv, "CCK and auto detect\n"); + errors |= BIT(8); + } + + if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == + RXON_FLG_TGG_PROTECT_MSK) { + IWL_WARN(priv, "TGg but no auto-detect\n"); + errors |= BIT(9); + } + + if (rxon->channel == 0) { + IWL_WARN(priv, "zero channel is invalid\n"); + errors |= BIT(10); + } + + WARN(errors, "Invalid RXON (%#x), channel %d", + errors, le16_to_cpu(rxon->channel)); + + return errors ? -EINVAL : 0; +} + +/** + * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @priv: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +static int iwl_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_rxon_cmd *staging = &ctx->staging; + const struct iwl_rxon_cmd *active = &ctx->active; + +#define CHK(cond) \ + if ((cond)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ + return 1; \ + } + +#define CHK_NEQ(c1, c2) \ + if ((c1) != (c2)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " \ + #c1 " != " #c2 " - %d != %d\n", \ + (c1), (c2)); \ + return 1; \ + } + + /* These items are only settable from the full RXON command */ + CHK(!iwl_is_associated_ctx(ctx)); + CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr)); + CHK(!ether_addr_equal(staging->node_addr, active->node_addr)); + CHK(!ether_addr_equal(staging->wlap_bssid_addr, + active->wlap_bssid_addr)); + CHK_NEQ(staging->dev_type, active->dev_type); + CHK_NEQ(staging->channel, active->channel); + CHK_NEQ(staging->air_propagation, active->air_propagation); + CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, + active->ofdm_ht_single_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, + active->ofdm_ht_dual_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates, + active->ofdm_ht_triple_stream_basic_rates); + CHK_NEQ(staging->assoc_id, active->assoc_id); + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, + active->flags & RXON_FLG_BAND_24G_MSK); + + /* Check if we are switching association toggle */ + CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, + active->filter_flags & RXON_FILTER_ASSOC_MSK); + +#undef CHK +#undef CHK_NEQ + + return 0; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +void iwl_print_rx_config_cmd(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ + struct iwl_rxon_context *ctx = &priv->contexts[ctxid]; + struct iwl_rxon_cmd *rxon = &ctx->staging; + + IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); + iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", + le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", + le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", + rxon->cck_basic_rates); + IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); + IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", + le16_to_cpu(rxon->assoc_id)); +} +#endif + +static void iwl_calc_basic_rates(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int lowest_present_ofdm = 100; + int lowest_present_cck = 100; + u8 cck = 0; + u8 ofdm = 0; + + if (ctx->vif) { + struct ieee80211_supported_band *sband; + unsigned long basic = ctx->vif->bss_conf.basic_rates; + int i; + + sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band]; + + for_each_set_bit(i, &basic, BITS_PER_LONG) { + int hw = sband->bitrates[i].hw_value; + if (hw >= IWL_FIRST_OFDM_RATE) { + ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); + if (lowest_present_ofdm > hw) + lowest_present_ofdm = hw; + } else { + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + cck |= BIT(hw); + if (lowest_present_cck > hw) + lowest_present_cck = hw; + } + } + } + + /* + * Now we've got the basic rates as bitmaps in the ofdm and cck + * variables. This isn't sufficient though, as there might not + * be all the right rates in the bitmap. E.g. if the only basic + * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps + * and 6 Mbps because the 802.11-2007 standard says in 9.6: + * + * [...] a STA responding to a received frame shall transmit + * its Control Response frame [...] at the highest rate in the + * BSSBasicRateSet parameter that is less than or equal to the + * rate of the immediately previous frame in the frame exchange + * sequence ([...]) and that is of the same modulation class + * ([...]) as the received frame. If no rate contained in the + * BSSBasicRateSet parameter meets these conditions, then the + * control frame sent in response to a received frame shall be + * transmitted at the highest mandatory rate of the PHY that is + * less than or equal to the rate of the received frame, and + * that is of the same modulation class as the received frame. + * + * As a consequence, we need to add all mandatory rates that are + * lower than all of the basic rates to these bitmaps. + */ + + if (IWL_RATE_24M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE; + if (IWL_RATE_12M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE; + /* 6M already there or needed so always add */ + ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE; + + /* + * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. + * Note, however: + * - if no CCK rates are basic, it must be ERP since there must + * be some basic rates at all, so they're OFDM => ERP PHY + * (or we're in 5 GHz, and the cck bitmap will never be used) + * - if 11M is a basic rate, it must be ERP as well, so add 5.5M + * - if 5.5M is basic, 1M and 2M are mandatory + * - if 2M is basic, 1M is mandatory + * - if 1M is basic, that's the only valid ACK rate. + * As a consequence, it's not as complicated as it sounds, just add + * any lower rates to the ACK rate bitmap. + */ + if (IWL_RATE_11M_INDEX < lowest_present_cck) + cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_5M_INDEX < lowest_present_cck) + cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_2M_INDEX < lowest_present_cck) + cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; + /* 1M already there or needed so always add */ + cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; + + IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n", + cck, ofdm); + + /* "basic_rates" is a misnomer here -- should be called ACK rates */ + ctx->staging.cck_basic_rates = cck; + ctx->staging.ofdm_basic_rates = ofdm; +} + +/** + * iwlagn_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + * + * The connect/disconnect flow should be as the following: + * + * 1. make sure send RXON command with association bit unset if not connect + * this should include the channel and the band for the candidate + * to be connected to + * 2. Add Station before RXON association with the AP + * 3. RXON_timing has to send before RXON for connection + * 4. full RXON command - associated bit set + * 5. use RXON_ASSOC command to update any flags changes + */ +int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active = (void *)&ctx->active; + bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); + int ret; + + lockdep_assert_held(&priv->mutex); + + if (!iwl_is_alive(priv)) + return -EBUSY; + + /* This function hardcodes a bunch of dual-mode assumptions */ + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + if (!ctx->is_active) + return 0; + + /* always get timestamp with Rx frame */ + ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; + + /* recalculate basic rates */ + iwl_calc_basic_rates(priv, ctx); + + /* + * force CTS-to-self frames protection if RTS-CTS is not preferred + * one aggregation protection method + */ + if (!priv->hw_params.use_rts_for_aggregation) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + + if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || + !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + iwl_print_rx_config_cmd(priv, ctx->ctxid); + ret = iwl_check_rxon_cmd(priv, ctx); + if (ret) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* + * receive commit_rxon request + * abort any previous channel switch if still in process + */ + if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) && + (priv->switch_channel != ctx->staging.channel)) { + IWL_DEBUG_11H(priv, "abort channel switch on %d\n", + le16_to_cpu(priv->switch_channel)); + iwl_chswitch_done(priv, false); + } + + /* + * If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. + */ + if (!iwl_full_rxon_required(priv, ctx)) { + ret = iwlagn_send_rxon_assoc(priv, ctx); + if (ret) { + IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); + return ret; + } + + memcpy(active, &ctx->staging, sizeof(*active)); + /* + * We do not commit tx power settings while channel changing, + * do it now if after settings changed. + */ + iwl_set_tx_power(priv, priv->tx_power_next, false); + + /* make sure we are in the right PS state */ + iwl_power_update_mode(priv, true); + + return 0; + } + + iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto); + + IWL_DEBUG_INFO(priv, + "Going to commit RXON\n" + " * with%s RXON_FILTER_ASSOC_MSK\n" + " * channel = %d\n" + " * bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(ctx->staging.channel), + ctx->staging.bssid_addr); + + /* + * Always clear associated first, but with the correct config. + * This is required as for example station addition for the + * AP station must be done after the BSSID is set to correctly + * set up filters in the device. + */ + ret = iwlagn_rxon_disconn(priv, ctx); + if (ret) + return ret; + + ret = iwlagn_set_pan_params(priv); + if (ret) + return ret; + + if (new_assoc) + return iwlagn_rxon_connect(priv, ctx); + + return 0; +} + +void iwlagn_config_ht40(struct ieee80211_conf *conf, + struct iwl_rxon_context *ctx) +{ + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } +} + +int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_rxon_context *ctx; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = conf->chandef.chan; + int ret = 0; + + IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); + + mutex_lock(&priv->mutex); + + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); + goto out; + } + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; + } + + if (changed & (IEEE80211_CONF_CHANGE_SMPS | + IEEE80211_CONF_CHANGE_CHANNEL)) { + /* mac80211 uses static for non-HT which is what we want */ + priv->current_ht_config.smps = conf->smps_mode; + + /* + * Recalculate chain counts. + * + * If monitor mode is enabled then mac80211 will + * set up the SM PS mode to OFF if an HT channel is + * configured. + */ + for_each_context(priv, ctx) + iwlagn_set_rxon_chain(priv, ctx); + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + for_each_context(priv, ctx) { + /* Configure HT40 channels */ + if (ctx->ht.enabled != conf_is_ht(conf)) + ctx->ht.enabled = conf_is_ht(conf); + + if (ctx->ht.enabled) { + /* if HT40 is used, it should not change + * after associated except channel switch */ + if (!ctx->ht.is_40mhz || + !iwl_is_associated_ctx(ctx)) + iwlagn_config_ht40(conf, ctx); + } else + ctx->ht.is_40mhz = false; + + /* + * Default to no protection. Protection mode will + * later be set from BSS config in iwl_ht_conf + */ + ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if (le16_to_cpu(ctx->staging.channel) != + channel->hw_value) + ctx->staging.flags = 0; + + iwl_set_rxon_channel(priv, channel, ctx); + iwl_set_rxon_ht(priv, &priv->current_ht_config); + + iwl_set_flags_for_band(priv, ctx, channel->band, + ctx->vif); + } + + iwl_update_bcast_stations(priv); + } + + if (changed & (IEEE80211_CONF_CHANGE_PS | + IEEE80211_CONF_CHANGE_IDLE)) { + ret = iwl_power_update_mode(priv, false); + if (ret) + IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", + priv->tx_power_user_lmt, conf->power_level); + + iwl_set_tx_power(priv, conf->power_level, false); + } + + for_each_context(priv, ctx) { + if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + continue; + iwlagn_commit_rxon(priv, ctx); + } + out: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static void iwlagn_check_needed_chains(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_bss_conf *bss_conf) +{ + struct ieee80211_vif *vif = ctx->vif; + struct iwl_rxon_context *tmp; + struct ieee80211_sta *sta; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct ieee80211_sta_ht_cap *ht_cap; + bool need_multiple; + + lockdep_assert_held(&priv->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!sta) { + /* + * If at all, this can only happen through a race + * when the AP disconnects us while we're still + * setting up the connection, in that case mac80211 + * will soon tell us about that. + */ + need_multiple = false; + rcu_read_unlock(); + break; + } + + ht_cap = &sta->ht_cap; + + need_multiple = true; + + /* + * If the peer advertises no support for receiving 2 and 3 + * stream MCS rates, it can't be transmitting them either. + */ + if (ht_cap->mcs.rx_mask[1] == 0 && + ht_cap->mcs.rx_mask[2] == 0) { + need_multiple = false; + } else if (!(ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_DEFINED)) { + /* If it can't TX MCS at all ... */ + need_multiple = false; + } else if (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_RX_DIFF) { + int maxstreams; + + /* + * But if it can receive them, it might still not + * be able to transmit them, which is what we need + * to check here -- so check the number of streams + * it advertises for TX (if different from RX). + */ + + maxstreams = (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK); + maxstreams >>= + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if (maxstreams <= 1) + need_multiple = false; + } + + rcu_read_unlock(); + break; + case NL80211_IFTYPE_ADHOC: + /* currently */ + need_multiple = false; + break; + default: + /* only AP really */ + need_multiple = true; + break; + } + + ctx->ht_need_multiple_chains = need_multiple; + + if (!need_multiple) { + /* check all contexts */ + for_each_context(priv, tmp) { + if (!tmp->vif) + continue; + if (tmp->ht_need_multiple_chains) { + need_multiple = true; + break; + } + } + } + + ht_conf->single_chain_sufficient = !need_multiple; +} + +static void iwlagn_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + int ret; + + if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) + return; + + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && + iwl_is_any_associated(priv)) { + struct iwl_calib_chain_noise_reset_cmd cmd; + + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + + memset(&cmd, 0, sizeof(cmd)); + iwl_set_calib_hdr(&cmd.hdr, + priv->phy_calib_chain_noise_reset_cmd); + ret = iwl_dvm_send_cmd_pdu(priv, + REPLY_PHY_CALIBRATION_CMD, + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, + "Could not send REPLY_PHY_CALIBRATION_CMD\n"); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); + } +} + +void iwlagn_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + int ret; + bool force = false; + + mutex_lock(&priv->mutex); + + if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { + /* + * If we go idle, then clearly no "passive-no-rx" + * workaround is needed any more, this is a reset. + */ + iwlagn_lift_passive_no_rx(priv); + } + + if (unlikely(!iwl_is_ready(priv))) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + if (unlikely(!ctx->vif)) { + IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n"); + mutex_unlock(&priv->mutex); + return; + } + + if (changes & BSS_CHANGED_BEACON_INT) + force = true; + + if (changes & BSS_CHANGED_QOS) { + ctx->qos_data.qos_active = bss_conf->qos; + iwlagn_update_qos(priv, ctx); + } + + ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (changes & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + priv->timestamp = bss_conf->sync_tsf; + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + } else { + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + if (ctx->ctxid == IWL_RXON_CTX_BSS) + priv->have_rekey_data = false; + } + + iwlagn_bt_coex_rssi_monitor(priv); + } + + if (ctx->ht.enabled) { + ctx->ht.protection = bss_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION; + ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + iwlagn_check_needed_chains(priv, ctx, bss_conf); + iwl_set_rxon_ht(priv, &priv->current_ht_config); + } + + iwlagn_set_rxon_chain(priv, ctx); + + if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) + ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); + + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + if (vif->bss_conf.enable_beacon) { + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + priv->beacon_ctx = ctx; + } else { + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + priv->beacon_ctx = NULL; + } + } + + /* + * If the ucode decides to do beacon filtering before + * association, it will lose beacons that are needed + * before sending frames out on passive channels. This + * causes association failures on those channels. Enable + * receiving beacons in such cases. + */ + + if (vif->type == NL80211_IFTYPE_STATION) { + if (!bss_conf->assoc) + ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK; + else + ctx->staging.filter_flags &= + ~RXON_FILTER_BCON_AWARE_MSK; + } + + if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + iwlagn_commit_rxon(priv, ctx); + + if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { + /* + * The chain noise calibration will enable PM upon + * completion. If calibration has already been run + * then we need to enable power management here. + */ + if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) + iwl_power_update_mode(priv, false); + + /* Enable RX differential gain and sensitivity calibrations */ + iwlagn_chain_noise_reset(priv); + priv->start_calib = 1; + } + + if (changes & BSS_CHANGED_IBSS) { + ret = iwlagn_manage_ibss_station(priv, vif, + bss_conf->ibss_joined); + if (ret) + IWL_ERR(priv, "failed to %s IBSS station %pM\n", + bss_conf->ibss_joined ? "add" : "remove", + bss_conf->bssid); + } + + if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) { + if (iwlagn_update_beacon(priv, vif)) + IWL_ERR(priv, "Error updating beacon\n"); + } + + mutex_unlock(&priv->mutex); +} + +void iwlagn_post_scan(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + + /* + * We do not commit power settings while scan is pending, + * do it now if the settings changed. + */ + iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); + iwl_set_tx_power(priv, priv->tx_power_next, false); + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + for_each_context(priv, ctx) + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + iwlagn_commit_rxon(priv, ctx); + + iwlagn_set_pan_params(priv); +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c new file mode 100644 index 000000000000..648159495bbc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -0,0 +1,1075 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include +#include +#include +#include + +#include "dev.h" +#include "agn.h" + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (20) + +#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) +#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 +#define MAX_SCAN_CHANNEL 50 + +/* For reset radio, need minimal dwell time only */ +#define IWL_RADIO_RESET_DWELL_TIME 5 + +static int iwl_send_scan_abort(struct iwl_priv *priv) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .flags = CMD_WANT_SKB, + }; + __le32 *status; + + /* Exit instantly with error when device is not ready + * to receive scan abort command or it does not perform + * hardware scan currently */ + if (!test_bit(STATUS_READY, &priv->status) || + !test_bit(STATUS_SCAN_HW, &priv->status) || + test_bit(STATUS_FW_ERROR, &priv->status)) + return -EIO; + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) + return ret; + + status = (void *)cmd.resp_pkt->data; + if (*status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", + le32_to_cpu(*status)); + ret = -EIO; + } + + iwl_free_resp(&cmd); + return ret; +} + +static void iwl_complete_scan(struct iwl_priv *priv, bool aborted) +{ + /* check if scan was requested from mac80211 */ + if (priv->scan_request) { + IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); + ieee80211_scan_completed(priv->hw, aborted); + } + + priv->scan_type = IWL_SCAN_NORMAL; + priv->scan_vif = NULL; + priv->scan_request = NULL; +} + +static void iwl_process_scan_complete(struct iwl_priv *priv) +{ + bool aborted; + + lockdep_assert_held(&priv->mutex); + + if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status)) + return; + + IWL_DEBUG_SCAN(priv, "Completed scan.\n"); + + cancel_delayed_work(&priv->scan_check); + + aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); + if (aborted) + IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); + + if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); + goto out_settings; + } + + if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { + int err; + + /* Check if mac80211 requested scan during our internal scan */ + if (priv->scan_request == NULL) + goto out_complete; + + /* If so request a new scan */ + err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL, + priv->scan_request->channels[0]->band); + if (err) { + IWL_DEBUG_SCAN(priv, + "failed to initiate pending scan: %d\n", err); + aborted = true; + goto out_complete; + } + + return; + } + +out_complete: + iwl_complete_scan(priv, aborted); + +out_settings: + /* Can we still talk to firmware ? */ + if (!iwl_is_ready_rf(priv)) + return; + + iwlagn_post_scan(priv); +} + +void iwl_force_scan_end(struct iwl_priv *priv) +{ + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); + return; + } + + IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); + clear_bit(STATUS_SCANNING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + clear_bit(STATUS_SCAN_COMPLETE, &priv->status); + iwl_complete_scan(priv, true); +} + +static void iwl_do_scan_abort(struct iwl_priv *priv) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); + return; + } + + if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); + return; + } + + ret = iwl_send_scan_abort(priv); + if (ret) { + IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret); + iwl_force_scan_end(priv); + } else + IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n"); +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + */ +int iwl_scan_cancel(struct iwl_priv *priv) +{ + IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); + queue_work(priv->workqueue, &priv->abort_scan); + return 0; +} + +/** + * iwl_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + */ +void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(ms); + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); + + iwl_do_scan_abort(priv); + + while (time_before_eq(jiffies, timeout)) { + if (!test_bit(STATUS_SCAN_HW, &priv->status)) + goto finished; + msleep(20); + } + + return; + + finished: + /* + * Now STATUS_SCAN_HW is clear. This means that the + * device finished, but the background work is going + * to execute at best as soon as we release the mutex. + * Since we need to be able to issue a new scan right + * after this function returns, run the complete here. + * The STATUS_SCAN_COMPLETE bit will then be cleared + * and prevent the background work from "completing" + * a possible new scan. + */ + iwl_process_scan_complete(priv); +} + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanreq_notification *notif = (void *)pkt->data; + + IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanstart_notification *notif = (void *)pkt->data; + + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN(priv, "Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanresults_notification *notif = (void *)pkt->data; + + IWL_DEBUG_SCAN(priv, "Scan ch.res: " + "%d [802.11%s] " + "probe status: %u:%u " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec\n", + notif->channel, + notif->band ? "bg" : "a", + notif->probe_status, notif->num_probe_not_sent, + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); +#endif +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data; + + IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", + (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + jiffies_to_msecs(jiffies - priv->scan_start)); + + /* + * When aborting, we run the scan completed background work inline + * and the background work must then do nothing. The SCAN_COMPLETE + * bit helps implement that logic and thus needs to be set before + * queueing the work. Also, since the scan abort waits for SCAN_HW + * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW + * to avoid a race there. + */ + set_bit(STATUS_SCAN_COMPLETE, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + queue_work(priv->workqueue, &priv->scan_completed); + + if (priv->iw_mode != NL80211_IFTYPE_ADHOC && + iwl_advanced_bt_coexist(priv) && + priv->bt_status != scan_notif->bt_status) { + if (scan_notif->bt_status) { + /* BT on */ + if (!priv->bt_ch_announce) + priv->bt_traffic_load = + IWL_BT_COEX_TRAFFIC_LOAD_HIGH; + /* + * otherwise, no traffic load information provided + * no changes made + */ + } else { + /* BT off */ + priv->bt_traffic_load = + IWL_BT_COEX_TRAFFIC_LOAD_NONE; + } + priv->bt_status = scan_notif->bt_status; + queue_work(priv->workqueue, + &priv->bt_traffic_change_work); + } +} + +void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) +{ + /* scan handlers */ + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_rx_scan_complete_notif; +} + +static u16 iwl_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, u8 n_probes) +{ + if (band == IEEE80211_BAND_5GHZ) + return IWL_ACTIVE_DWELL_TIME_52 + + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); + else + return IWL_ACTIVE_DWELL_TIME_24 + + IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); +} + +static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) +{ + struct iwl_rxon_context *ctx; + int limits[NUM_IWL_RXON_CTX] = {}; + int n_active = 0; + u16 limit; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + /* + * If we're associated, we clamp the dwell time 98% + * of the beacon interval (minus 2 * channel tune time) + * If both contexts are active, we have to restrict to + * 1/2 of the minimum of them, because they might be in + * lock-step with the time inbetween only half of what + * time we'd have in each of them. + */ + for_each_context(priv, ctx) { + switch (ctx->staging.dev_type) { + case RXON_DEV_TYPE_P2P: + /* no timing constraints */ + continue; + case RXON_DEV_TYPE_ESS: + default: + /* timing constraints if associated */ + if (!iwl_is_associated_ctx(ctx)) + continue; + break; + case RXON_DEV_TYPE_CP: + case RXON_DEV_TYPE_2STA: + /* + * These seem to always have timers for TBTT + * active in uCode even when not associated yet. + */ + break; + } + + limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE; + } + + switch (n_active) { + case 0: + return dwell_time; + case 2: + limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + limit /= 2; + dwell_time = min(limit, dwell_time); + /* fall through to limit further */ + case 1: + limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + limit /= n_active; + return min(limit, dwell_time); + default: + WARN_ON_ONCE(1); + return dwell_time; + } +} + +static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band) +{ + u16 passive = (band == IEEE80211_BAND_2GHZ) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + return iwl_limit_dwell(priv, passive); +} + +/* Return valid, unused, channel for a passive scan to reset the RF */ +static u8 iwl_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band) +{ + struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band]; + struct iwl_rxon_context *ctx; + int i; + + for (i = 0; i < sband->n_channels; i++) { + bool busy = false; + + for_each_context(priv, ctx) { + busy = sband->channels[i].hw_value == + le16_to_cpu(ctx->staging.channel); + if (busy) + break; + } + + if (busy) + continue; + + if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED)) + return sband->channels[i].hw_value; + } + + return 0; +} + +static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_supported_band *sband; + u16 channel; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) { + IWL_ERR(priv, "invalid band\n"); + return 0; + } + + channel = iwl_get_single_channel_number(priv, band); + if (channel) { + scan_ch->channel = cpu_to_le16(channel); + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + scan_ch->active_dwell = + cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); + scan_ch->passive_dwell = + cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + return 1; + } + + IWL_ERR(priv, "no valid channel found\n"); + return 0; +} + +static int iwl_get_channels_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + u16 channel; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_get_passive_dwell_time(priv, band); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + channel = chan->hw_value; + scan_ch->channel = cpu_to_le16(channel); + + if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR)) + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + + if (n_probes) + scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + + IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", + channel, le32_to_cpu(scan_ch->type), + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + "ACTIVE" : "PASSIVE", + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); + return added; +} + +/** + * iwl_fill_probe_req - fill in all required fields and IE for probe request + */ + +static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta, + const u8 *ies, int ie_len, const u8 *ssid, + u8 ssid_len, int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + eth_broadcast_addr(frame->da); + memcpy(frame->sa, ta, ETH_ALEN); + eth_broadcast_addr(frame->bssid); + frame->seq_ctrl = 0; + + len += 24; + + /* ...next IE... */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our SSID IE */ + left -= ssid_len + 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + *pos++ = ssid_len; + if (ssid && ssid_len) { + memcpy(pos, ssid, ssid_len); + pos += ssid_len; + } + + len += ssid_len + 2; + + if (WARN_ON(left < ie_len)) + return len; + + if (ies && ie_len) { + memcpy(pos, ies, ie_len); + len += ie_len; + } + + return (u16)len; +} + +static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = { sizeof(struct iwl_scan_cmd), }, + }; + struct iwl_scan_cmd *scan; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u32 rate_flags = 0; + u16 cmd_len = 0; + u16 rx_chain = 0; + enum ieee80211_band band; + u8 n_probes = 0; + u8 rx_ant = priv->nvm_data->valid_rx_ant; + u8 rate; + bool is_active = false; + int chan_mod; + u8 active_chains; + u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant; + int ret; + int scan_cmd_size = sizeof(struct iwl_scan_cmd) + + MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) + + priv->fw->ucode_capa.max_probe_length; + const u8 *ssid = NULL; + u8 ssid_len = 0; + + if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL && + (!priv->scan_request || + priv->scan_request->n_channels > MAX_SCAN_CHANNEL))) + return -EINVAL; + + lockdep_assert_held(&priv->mutex); + + if (vif) + ctx = iwl_rxon_ctx_from_vif(vif); + + if (!priv->scan_cmd) { + priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL); + if (!priv->scan_cmd) { + IWL_DEBUG_SCAN(priv, + "fail to allocate memory for scan\n"); + return -ENOMEM; + } + } + scan = priv->scan_cmd; + memset(scan, 0, scan_cmd_size); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_any_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + switch (priv->scan_type) { + case IWL_SCAN_RADIO_RESET: + interval = 0; + break; + case IWL_SCAN_NORMAL: + interval = vif->bss_conf.beacon_int; + break; + } + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + switch (priv->scan_type) { + case IWL_SCAN_RADIO_RESET: + IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); + /* + * Override quiet time as firmware checks that active + * dwell is >= quiet; since we use passive scan it'll + * not actually be used. + */ + scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); + break; + case IWL_SCAN_NORMAL: + if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + /* + * The highest priority SSID is inserted to the + * probe request template. + */ + ssid_len = priv->scan_request->ssids[0].ssid_len; + ssid = priv->scan_request->ssids[0].ssid; + + /* + * Invert the order of ssids, the firmware will invert + * it back. + */ + for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) { + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + break; + } + + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = ctx->bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + switch (priv->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + chan_mod = le32_to_cpu( + priv->contexts[IWL_RXON_CTX_BSS].active.flags & + RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + if ((priv->scan_request && priv->scan_request->no_cck) || + chan_mod == CHANNEL_MODE_PURE_40) { + rate = IWL_RATE_6M_PLCP; + } else { + rate = IWL_RATE_1M_PLCP; + rate_flags = RATE_MCS_CCK_MSK; + } + /* + * Internal scans are passive, so we can indiscriminately set + * the BT ignore flag on 2.4 GHz since it applies to TX only. + */ + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) + scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; + break; + case IEEE80211_BAND_5GHZ: + rate = IWL_RATE_6M_PLCP; + break; + default: + IWL_WARN(priv, "Invalid scan band\n"); + return -EIO; + } + + /* + * If active scanning is requested but a certain channel is + * marked passive, we can do active scanning if we detect + * transmissions. + * + * There is an issue with some firmware versions that triggers + * a sysassert on a "good CRC threshold" of zero (== disabled), + * on a radar channel even though this means that we should NOT + * send probes. + * + * The "good CRC threshold" is the number of frames that we + * need to receive during our dwell time on a channel before + * sending out probes -- setting this to a huge value will + * mean we never reach it, but at the same time work around + * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER + * here instead of IWL_GOOD_CRC_TH_DISABLED. + * + * This was fixed in later versions along with some other + * scan changes, and the threshold behaves as a flag in those + * versions. + */ + if (priv->new_scan_threshold_behaviour) + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_DISABLED; + else + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_NEVER; + + band = priv->scan_band; + + if (band == IEEE80211_BAND_2GHZ && + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + /* transmit 2.4 GHz probes only on first antenna */ + scan_tx_antennas = first_antenna(scan_tx_antennas); + } + + priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, + priv->scan_tx_ant[band], + scan_tx_antennas); + rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); + scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); + + /* + * In power save mode while associated use one chain, + * otherwise use all chains + */ + if (test_bit(STATUS_POWER_PMI, &priv->status) && + !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) { + /* rx_ant has been set to all valid chains previously */ + active_chains = rx_ant & + ((u8)(priv->chain_noise_data.active_chains)); + if (!active_chains) + active_chains = rx_ant; + + IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", + priv->chain_noise_data.active_chains); + + rx_ant = first_antenna(active_chains); + } + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && + priv->bt_full_concurrent) { + /* operated as 1x1 in full concurrency mode */ + rx_ant = first_antenna(rx_ant); + } + + /* MIMO is not used here, but value is required */ + rx_chain |= + priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); + switch (priv->scan_type) { + case IWL_SCAN_NORMAL: + cmd_len = iwl_fill_probe_req( + (struct ieee80211_mgmt *)scan->data, + vif->addr, + priv->scan_request->ie, + priv->scan_request->ie_len, + ssid, ssid_len, + scan_cmd_size - sizeof(*scan)); + break; + case IWL_SCAN_RADIO_RESET: + /* use bcast addr, will not be transmitted but must be valid */ + cmd_len = iwl_fill_probe_req( + (struct ieee80211_mgmt *)scan->data, + iwl_bcast_addr, NULL, 0, + NULL, 0, + scan_cmd_size - sizeof(*scan)); + break; + default: + BUG(); + } + scan->tx_cmd.len = cpu_to_le16(cmd_len); + + scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | + RXON_FILTER_BCON_AWARE_MSK); + + switch (priv->scan_type) { + case IWL_SCAN_RADIO_RESET: + scan->channel_count = + iwl_get_channel_for_reset_scan(priv, vif, band, + (void *)&scan->data[cmd_len]); + break; + case IWL_SCAN_NORMAL: + scan->channel_count = + iwl_get_channels_for_scan(priv, vif, band, + is_active, n_probes, + (void *)&scan->data[cmd_len]); + break; + } + + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data[0] = scan; + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + scan->len = cpu_to_le16(cmd.len[0]); + + /* set scan bit here for PAN params */ + set_bit(STATUS_SCAN_HW, &priv->status); + + ret = iwlagn_set_pan_params(priv); + if (ret) { + clear_bit(STATUS_SCAN_HW, &priv->status); + return ret; + } + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) { + clear_bit(STATUS_SCAN_HW, &priv->status); + iwlagn_set_pan_params(priv); + } + + return ret; +} + +void iwl_init_scan_params(struct iwl_priv *priv) +{ + u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1; + if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; +} + +int __must_check iwl_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum iwl_scan_type scan_type, + enum ieee80211_band band) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + cancel_delayed_work(&priv->scan_check); + + if (!iwl_is_ready_rf(priv)) { + IWL_WARN(priv, "Request scan called when driver not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_SCAN(priv, + "Multiple concurrent scan requests in parallel.\n"); + return -EBUSY; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); + return -EBUSY; + } + + IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", + scan_type == IWL_SCAN_NORMAL ? "" : + "internal short "); + + set_bit(STATUS_SCANNING, &priv->status); + priv->scan_type = scan_type; + priv->scan_start = jiffies; + priv->scan_band = band; + + ret = iwlagn_request_scan(priv, vif); + if (ret) { + clear_bit(STATUS_SCANNING, &priv->status); + priv->scan_type = IWL_SCAN_NORMAL; + return ret; + } + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + return 0; +} + + +/* + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ +void iwl_internal_short_hw_scan(struct iwl_priv *priv) +{ + queue_work(priv->workqueue, &priv->start_internal_scan); +} + +static void iwl_bg_start_internal_scan(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, start_internal_scan); + + IWL_DEBUG_SCAN(priv, "Start internal scan\n"); + + mutex_lock(&priv->mutex); + + if (priv->scan_type == IWL_SCAN_RADIO_RESET) { + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); + goto unlock; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + goto unlock; + } + + if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band)) + IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); + unlock: + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + IWL_DEBUG_SCAN(priv, "Scan check work\n"); + + /* Since we are here firmware does not finish scan and + * most likely is in bad shape, so we don't bother to + * send abort command, just force scan complete to mac80211 */ + mutex_lock(&priv->mutex); + iwl_force_scan_end(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); + + IWL_DEBUG_SCAN(priv, "Abort scan work\n"); + + /* We keep scan_check work queued in case when firmware will not + * report back scan completed notification */ + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 200); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + + mutex_lock(&priv->mutex); + iwl_process_scan_complete(priv); + mutex_unlock(&priv->mutex); +} + +void iwl_setup_scan_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); +} + +void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->start_internal_scan); + cancel_work_sync(&priv->abort_scan); + cancel_work_sync(&priv->scan_completed); + + if (cancel_delayed_work_sync(&priv->scan_check)) { + mutex_lock(&priv->mutex); + iwl_force_scan_end(priv); + mutex_unlock(&priv->mutex); + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c new file mode 100644 index 000000000000..0fa67d3b7235 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -0,0 +1,1442 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include "iwl-trans.h" +#include "dev.h" +#include "agn.h" + +const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) +{ + lockdep_assert_held(&priv->sta_lock); + + if (sta_id >= IWLAGN_STATION_COUNT) { + IWL_ERR(priv, "invalid sta_id %u\n", sta_id); + return -EINVAL; + } + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) + IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u " + "addr %pM\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + + if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_ASSOC(priv, + "STA id %u addr %pM already present in uCode " + "(according to driver)\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } else { + priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } + return 0; +} + +static void iwl_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) +{ + struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data; + + IWL_DEBUG_INFO(priv, "Processing response for adding station\n"); + + spin_lock_bh(&priv->sta_lock); + + switch (add_sta_resp->status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); + break; + case ADD_STA_NO_ROOM_IN_TABLE: + IWL_ERR(priv, "Adding station failed, no room in table.\n"); + break; + case ADD_STA_NO_BLOCK_ACK_RESOURCE: + IWL_ERR(priv, + "Adding station failed, no block ack resource.\n"); + break; + case ADD_STA_MODIFY_NON_EXIST_STA: + IWL_ERR(priv, "Attempting to modify non-existing station\n"); + break; + default: + IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", + add_sta_resp->status); + break; + } + + spin_unlock_bh(&priv->sta_lock); +} + +void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + iwl_process_add_sta_resp(priv, pkt); +} + +int iwl_send_add_sta(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags) +{ + int ret = 0; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .flags = flags, + .data = { sta, }, + .len = { sizeof(*sta), }, + }; + u8 sta_id __maybe_unused = sta->sta.sta_id; + struct iwl_rx_packet *pkt; + struct iwl_add_sta_resp *add_sta_resp; + + IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", + sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); + + if (!(flags & CMD_ASYNC)) { + cmd.flags |= CMD_WANT_SKB; + might_sleep(); + } + + ret = iwl_dvm_send_cmd(priv, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + pkt = cmd.resp_pkt; + add_sta_resp = (void *)pkt->data; + + /* debug messages are printed in the handler */ + if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) { + spin_lock_bh(&priv->sta_lock); + ret = iwl_sta_ucode_activate(priv, sta_id); + spin_unlock_bh(&priv->sta_lock); + } else { + ret = -EIO; + } + + iwl_free_resp(&cmd); + + return ret; +} + +bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta *sta) +{ + if (!ctx->ht.enabled || !ctx->ht.is_40mhz) + return false; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (priv->disable_ht40) + return false; +#endif + + /* special case for RXON */ + if (!sta) + return true; + + return sta->bandwidth >= IEEE80211_STA_RX_BW_40; +} + +static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, + struct ieee80211_sta *sta, + struct iwl_rxon_context *ctx, + __le32 *flags, __le32 *mask) +{ + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + + *mask = STA_FLG_RTS_MIMO_PROT_MSK | + STA_FLG_MIMO_DIS_MSK | + STA_FLG_HT40_EN_MSK | + STA_FLG_MAX_AGG_SIZE_MSK | + STA_FLG_AGG_MPDU_DENSITY_MSK; + *flags = 0; + + if (!sta || !sta_ht_inf->ht_supported) + return; + + IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n", + sta->addr, + (sta->smps_mode == IEEE80211_SMPS_STATIC) ? + "static" : + (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? + "dynamic" : "disabled"); + + switch (sta->smps_mode) { + case IEEE80211_SMPS_STATIC: + *flags |= STA_FLG_MIMO_DIS_MSK; + break; + case IEEE80211_SMPS_DYNAMIC: + *flags |= STA_FLG_RTS_MIMO_PROT_MSK; + break; + case IEEE80211_SMPS_OFF: + break; + default: + IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode); + break; + } + + *flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + *flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) + *flags |= STA_FLG_HT40_EN_MSK; +} + +int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_sta *sta) +{ + u8 sta_id = iwl_sta_id(sta); + __le32 flags, mask; + struct iwl_addsta_cmd cmd; + + if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask); + + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].sta.station_flags &= ~mask; + priv->stations[sta_id].sta.station_flags |= flags; + spin_unlock_bh(&priv->sta_lock); + + memset(&cmd, 0, sizeof(cmd)); + cmd.mode = STA_CONTROL_MODIFY_MSK; + cmd.station_flags_msk = mask; + cmd.station_flags = flags; + cmd.sta.sta_id = sta_id; + + return iwl_send_add_sta(priv, &cmd, 0); +} + +static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, + struct ieee80211_sta *sta, + struct iwl_rxon_context *ctx) +{ + __le32 flags, mask; + + iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask); + + lockdep_assert_held(&priv->sta_lock); + priv->stations[index].sta.station_flags &= ~mask; + priv->stations[index].sta.station_flags |= flags; +} + +/** + * iwl_prep_station - Prepare station information for addition + * + * should be called with sta_lock held + */ +u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta) +{ + struct iwl_station_entry *station; + int i; + u8 sta_id = IWL_INVALID_STATION; + + if (is_ap) + sta_id = ctx->ap_sta_id; + else if (is_broadcast_ether_addr(addr)) + sta_id = ctx->bcast_sta_id; + else + for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) { + if (ether_addr_equal(priv->stations[i].sta.sta.addr, + addr)) { + sta_id = i; + break; + } + + if (!priv->stations[i].used && + sta_id == IWL_INVALID_STATION) + sta_id = i; + } + + /* + * These two conditions have the same outcome, but keep them + * separate + */ + if (unlikely(sta_id == IWL_INVALID_STATION)) + return sta_id; + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, "STA %d already in process of being " + "added.\n", sta_id); + return sta_id; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && + ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) { + IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not " + "adding again.\n", sta_id, addr); + return sta_id; + } + + station = &priv->stations[sta_id]; + station->used = IWL_STA_DRIVER_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", + sta_id, addr); + priv->num_stations++; + + /* Set up the REPLY_ADD_STA command to send to device */ + memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = sta_id; + station->sta.station_flags = ctx->station_flags; + station->ctxid = ctx->ctxid; + + if (sta) { + struct iwl_station_priv *sta_priv; + + sta_priv = (void *)sta->drv_priv; + sta_priv->ctx = ctx; + } + + /* + * OK to call unconditionally, since local stations (IBSS BSSID + * STA and broadcast STA) pass in a NULL sta, and mac80211 + * doesn't allow HT IBSS. + */ + iwl_set_ht_add_station(priv, sta_id, sta, ctx); + + return sta_id; + +} + +#define STA_WAIT_TIMEOUT (HZ/2) + +/** + * iwl_add_station_common - + */ +int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r) +{ + int ret = 0; + u8 sta_id; + struct iwl_addsta_cmd sta_cmd; + + *sta_id_r = 0; + spin_lock_bh(&priv->sta_lock); + sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare station %pM for addition\n", + addr); + spin_unlock_bh(&priv->sta_lock); + return -EINVAL; + } + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, "STA %d already in process of being " + "added.\n", sta_id); + spin_unlock_bh(&priv->sta_lock); + return -EEXIST; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not " + "adding again.\n", sta_id, addr); + spin_unlock_bh(&priv->sta_lock); + return -EEXIST; + } + + priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_addsta_cmd)); + spin_unlock_bh(&priv->sta_lock); + + /* Add station to device's station table */ + ret = iwl_send_add_sta(priv, &sta_cmd, 0); + if (ret) { + spin_lock_bh(&priv->sta_lock); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[sta_id].sta.sta.addr); + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_bh(&priv->sta_lock); + } + *sta_id_r = sta_id; + return ret; +} + +/** + * iwl_sta_ucode_deactivate - deactivate ucode status for a station + */ +static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) +{ + lockdep_assert_held(&priv->sta_lock); + + /* Ucode must be active and driver must be non active */ + if ((priv->stations[sta_id].used & + (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != + IWL_STA_UCODE_ACTIVE) + IWL_ERR(priv, "removed non active STA %u\n", sta_id); + + priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; + + memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); + IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); +} + +static int iwl_send_remove_station(struct iwl_priv *priv, + const u8 *addr, int sta_id, + bool temporary) +{ + struct iwl_rx_packet *pkt; + int ret; + struct iwl_rem_sta_cmd rm_sta_cmd; + struct iwl_rem_sta_resp *rem_sta_resp; + + struct iwl_host_cmd cmd = { + .id = REPLY_REMOVE_STA, + .len = { sizeof(struct iwl_rem_sta_cmd), }, + .data = { &rm_sta_cmd, }, + }; + + memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); + rm_sta_cmd.num_sta = 1; + memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); + + cmd.flags |= CMD_WANT_SKB; + + ret = iwl_dvm_send_cmd(priv, &cmd); + + if (ret) + return ret; + + pkt = cmd.resp_pkt; + rem_sta_resp = (void *)pkt->data; + + switch (rem_sta_resp->status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_bh(&priv->sta_lock); + iwl_sta_ucode_deactivate(priv, sta_id); + spin_unlock_bh(&priv->sta_lock); + } + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; + } + + iwl_free_resp(&cmd); + + return ret; +} + +/** + * iwl_remove_station - Remove driver's knowledge of station. + */ +int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr) +{ + u8 tid; + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Unable to remove station %pM, device not ready.\n", + addr); + /* + * It is typical for stations to be removed when we are + * going down. Return success since device will be down + * soon anyway + */ + return 0; + } + + IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", + sta_id, addr); + + if (WARN_ON(sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + spin_lock_bh(&priv->sta_lock); + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", + addr); + goto out_err; + } + + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", + addr); + goto out_err; + } + + if (priv->stations[sta_id].used & IWL_STA_LOCAL) { + kfree(priv->stations[sta_id].lq); + priv->stations[sta_id].lq = NULL; + } + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + memset(&priv->tid_data[sta_id][tid], 0, + sizeof(priv->tid_data[sta_id][tid])); + + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + + priv->num_stations--; + + if (WARN_ON(priv->num_stations < 0)) + priv->num_stations = 0; + + spin_unlock_bh(&priv->sta_lock); + + return iwl_send_remove_station(priv, addr, sta_id, false); +out_err: + spin_unlock_bh(&priv->sta_lock); + return -EINVAL; +} + +void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr) +{ + u8 tid; + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Unable to remove station %pM, device not ready.\n", + addr); + return; + } + + IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id); + + if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION)) + return; + + spin_lock_bh(&priv->sta_lock); + + WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)); + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + memset(&priv->tid_data[sta_id][tid], 0, + sizeof(priv->tid_data[sta_id][tid])); + + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + + priv->num_stations--; + + if (WARN_ON_ONCE(priv->num_stations < 0)) + priv->num_stations = 0; + + spin_unlock_bh(&priv->sta_lock); +} + +static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + u8 sta_id, struct iwl_link_quality_cmd *link_cmd) +{ + int i, r; + u32 rate_flags = 0; + __le32 rate_n_flags; + + lockdep_assert_held(&priv->mutex); + + memset(link_cmd, 0, sizeof(*link_cmd)); + + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else if (ctx && ctx->vif && ctx->vif->p2p) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) << + RATE_MCS_ANT_POS; + rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + link_cmd->rs_table[i].rate_n_flags = rate_n_flags; + + link_cmd->general_params.single_stream_ant_msk = + first_antenna(priv->nvm_data->valid_tx_ant); + + link_cmd->general_params.dual_stream_ant_msk = + priv->nvm_data->valid_tx_ant & + ~first_antenna(priv->nvm_data->valid_tx_ant); + if (!link_cmd->general_params.dual_stream_ant_msk) { + link_cmd->general_params.dual_stream_ant_msk = ANT_AB; + } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) { + link_cmd->general_params.dual_stream_ant_msk = + priv->nvm_data->valid_tx_ant; + } + + link_cmd->agg_params.agg_dis_start_th = + LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + link_cmd->sta_id = sta_id; +} + +/** + * iwl_clear_ucode_stations - clear ucode station table bits + * + * This function clears all the bits in the driver indicating + * which stations are active in the ucode. Call when something + * other than explicit station management would cause this in + * the ucode, e.g. unassociated RXON. + */ +void iwl_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int i; + bool cleared = false; + + IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); + + spin_lock_bh(&priv->sta_lock); + for (i = 0; i < IWLAGN_STATION_COUNT; i++) { + if (ctx && ctx->ctxid != priv->stations[i].ctxid) + continue; + + if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_INFO(priv, + "Clearing ucode active for station %d\n", i); + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + cleared = true; + } + } + spin_unlock_bh(&priv->sta_lock); + + if (!cleared) + IWL_DEBUG_INFO(priv, + "No active stations found to be cleared\n"); +} + +/** + * iwl_restore_stations() - Restore driver known stations to device + * + * All stations considered active by driver, but not present in ucode, is + * restored. + * + * Function sleeps. + */ +void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_addsta_cmd sta_cmd; + static const struct iwl_link_quality_cmd zero_lq = {}; + struct iwl_link_quality_cmd lq; + int i; + bool found = false; + int ret; + bool send_lq; + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Not ready yet, not restoring any stations.\n"); + return; + } + + IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); + spin_lock_bh(&priv->sta_lock); + for (i = 0; i < IWLAGN_STATION_COUNT; i++) { + if (ctx->ctxid != priv->stations[i].ctxid) + continue; + if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && + !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].sta.mode = 0; + priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; + found = true; + } + } + + for (i = 0; i < IWLAGN_STATION_COUNT; i++) { + if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { + memcpy(&sta_cmd, &priv->stations[i].sta, + sizeof(struct iwl_addsta_cmd)); + send_lq = false; + if (priv->stations[i].lq) { + if (priv->wowlan) + iwl_sta_fill_lq(priv, ctx, i, &lq); + else + memcpy(&lq, priv->stations[i].lq, + sizeof(struct iwl_link_quality_cmd)); + + if (memcmp(&lq, &zero_lq, sizeof(lq))) + send_lq = true; + } + spin_unlock_bh(&priv->sta_lock); + ret = iwl_send_add_sta(priv, &sta_cmd, 0); + if (ret) { + spin_lock_bh(&priv->sta_lock); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].used &= + ~IWL_STA_DRIVER_ACTIVE; + priv->stations[i].used &= + ~IWL_STA_UCODE_INPROGRESS; + continue; + } + /* + * Rate scaling has already been initialized, send + * current LQ command + */ + if (send_lq) + iwl_send_lq_cmd(priv, ctx, &lq, 0, true); + spin_lock_bh(&priv->sta_lock); + priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; + } + } + + spin_unlock_bh(&priv->sta_lock); + if (!found) + IWL_DEBUG_INFO(priv, "Restoring all known stations .... " + "no stations to be restored.\n"); + else + IWL_DEBUG_INFO(priv, "Restoring all known stations .... " + "complete.\n"); +} + +int iwl_get_free_ucode_key_offset(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < priv->sta_key_max_num; i++) + if (!test_and_set_bit(i, &priv->ucode_key_table)) + return i; + + return WEP_INVALID_OFFSET; +} + +void iwl_dealloc_bcast_stations(struct iwl_priv *priv) +{ + int i; + + spin_lock_bh(&priv->sta_lock); + for (i = 0; i < IWLAGN_STATION_COUNT; i++) { + if (!(priv->stations[i].used & IWL_STA_BCAST)) + continue; + + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + priv->num_stations--; + if (WARN_ON(priv->num_stations < 0)) + priv->num_stations = 0; + kfree(priv->stations[i].lq); + priv->stations[i].lq = NULL; + } + spin_unlock_bh(&priv->sta_lock); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static void iwl_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ + int i; + IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +} +#else +static inline void iwl_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ +} +#endif + +/** + * is_lq_table_valid() - Test one aspect of LQ cmd for validity + * + * It sometimes happens when a HT rate has been in use and we + * loose connectivity with AP then mac80211 will first tell us that the + * current channel is not HT anymore before removing the station. In such a + * scenario the RXON flags will be updated to indicate we are not + * communicating HT anymore, but the LQ command may still contain HT rates. + * Test for this to prevent driver from sending LQ command between the time + * RXON flags are updated and when LQ command is updated. + */ +static bool is_lq_table_valid(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq) +{ + int i; + + if (ctx->ht.enabled) + return true; + + IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", + ctx->active.channel); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & + RATE_MCS_HT_MSK) { + IWL_DEBUG_INFO(priv, + "index %d of LQ expects HT channel\n", + i); + return false; + } + } + return true; +} + +/** + * iwl_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, u8 flags, bool init) +{ + int ret = 0; + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = { sizeof(struct iwl_link_quality_cmd), }, + .flags = flags, + .data = { lq, }, + }; + + if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + + spin_lock_bh(&priv->sta_lock); + if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + spin_unlock_bh(&priv->sta_lock); + return -EINVAL; + } + spin_unlock_bh(&priv->sta_lock); + + iwl_dump_lq_cmd(priv, lq); + if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) + return -EINVAL; + + if (is_lq_table_valid(priv, ctx, lq)) + ret = iwl_dvm_send_cmd(priv, &cmd); + else + ret = -EINVAL; + + if (cmd.flags & CMD_ASYNC) + return ret; + + if (init) { + IWL_DEBUG_INFO(priv, "init LQ command complete, " + "clearing sta addition status for sta %d\n", + lq->sta_id); + spin_lock_bh(&priv->sta_lock); + priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_bh(&priv->sta_lock); + } + return ret; +} + + +static struct iwl_link_quality_cmd * +iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + u8 sta_id) +{ + struct iwl_link_quality_cmd *link_cmd; + + link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); + if (!link_cmd) { + IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); + return NULL; + } + + iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd); + + return link_cmd; +} + +/* + * iwlagn_add_bssid_station - Add the special IBSS BSSID station + * + * Function sleeps. + */ +int iwlagn_add_bssid_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r) +{ + int ret; + u8 sta_id; + struct iwl_link_quality_cmd *link_cmd; + + if (sta_id_r) + *sta_id_r = IWL_INVALID_STATION; + + ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].used |= IWL_STA_LOCAL; + spin_unlock_bh(&priv->sta_lock); + + /* Set up default rate scaling table in device's station table */ + link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for station %pM.\n", + addr); + return -ENOMEM; + } + + ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true); + if (ret) + IWL_ERR(priv, "Link quality command failed (%d)\n", ret); + + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_bh(&priv->sta_lock); + + return 0; +} + +/* + * static WEP keys + * + * For each context, the device has a table of 4 static WEP keys + * (one for each key index) that is updated with the following + * commands. + */ + +static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + bool send_if_empty) +{ + int i, not_empty = 0; + u8 buff[sizeof(struct iwl_wep_cmd) + + sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; + struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; + size_t cmd_size = sizeof(struct iwl_wep_cmd); + struct iwl_host_cmd cmd = { + .id = ctx->wep_key_cmd, + .data = { wep_cmd, }, + }; + + might_sleep(); + + memset(wep_cmd, 0, cmd_size + + (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); + + for (i = 0; i < WEP_KEYS_MAX ; i++) { + wep_cmd->key[i].key_index = i; + if (ctx->wep_keys[i].key_size) { + wep_cmd->key[i].key_offset = i; + not_empty = 1; + } else { + wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; + } + + wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; + memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, + ctx->wep_keys[i].key_size); + } + + wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; + wep_cmd->num_keys = WEP_KEYS_MAX; + + cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; + + cmd.len[0] = cmd_size; + + if (not_empty || send_if_empty) + return iwl_dvm_send_cmd(priv, &cmd); + else + return 0; +} + +int iwl_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + lockdep_assert_held(&priv->mutex); + + return iwl_send_static_wepkey_cmd(priv, ctx, false); +} + +int iwl_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", + keyconf->keyidx); + + memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_WEPKEY command due to RFKILL.\n"); + /* but keys in device are clear anyway so return success */ + return 0; + } + ret = iwl_send_static_wepkey_cmd(priv, ctx, 1); + IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", + keyconf->keyidx, ret); + + return ret; +} + +int iwl_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (keyconf->keylen != WEP_KEY_LEN_128 && + keyconf->keylen != WEP_KEY_LEN_64) { + IWL_DEBUG_WEP(priv, + "Bad WEP key length %d\n", keyconf->keylen); + return -EINVAL; + } + + keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT; + + ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; + memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, + keyconf->keylen); + + ret = iwl_send_static_wepkey_cmd(priv, ctx, false); + IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", + keyconf->keylen, keyconf->keyidx, ret); + + return ret; +} + +/* + * dynamic (per-station) keys + * + * The dynamic keys are a little more complicated. The device has + * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys. + * These are linked to stations by a table that contains an index + * into the key table for each station/key index/{mcast,unicast}, + * i.e. it's basically an array of pointers like this: + * key_offset_t key_mapping[NUM_STATIONS][4][2]; + * (it really works differently, but you can think of it as such) + * + * The key uploading and linking happens in the same command, the + * add station command with STA_MODIFY_KEY_MASK. + */ + +static u8 iwlagn_key_sta_id(struct iwl_priv *priv, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + if (sta) + return iwl_sta_id(sta); + + /* + * The device expects GTKs for station interfaces to be + * installed as GTKs for the AP station. If we have no + * station ID, then use the ap_sta_id in that case. + */ + if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx) + return vif_priv->ctx->ap_sta_id; + + return IWL_INVALID_STATION; +} + +static int iwlagn_send_sta_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, + u32 cmd_flags) +{ + __le16 key_flags; + struct iwl_addsta_cmd sta_cmd; + int i; + + spin_lock_bh(&priv->sta_lock); + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); + spin_unlock_bh(&priv->sta_lock); + + key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags |= STA_KEY_FLG_MAP_KEY_MSK; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + key_flags |= STA_KEY_FLG_CCMP; + memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_TKIP: + key_flags |= STA_KEY_FLG_TKIP; + sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; + for (i = 0; i < 5; i++) + sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); + memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_WEP104: + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + key_flags |= STA_KEY_FLG_WEP; + memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + key_flags |= STA_KEY_MULTICAST_MSK; + + /* key pointer (offset) */ + sta_cmd.key.key_offset = keyconf->hw_key_idx; + + sta_cmd.key.key_flags = key_flags; + sta_cmd.mode = STA_CONTROL_MODIFY_MSK; + sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; + + return iwl_send_add_sta(priv, &sta_cmd, cmd_flags); +} + +void iwl_update_tkip_key(struct iwl_priv *priv, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) +{ + u8 sta_id = iwlagn_key_sta_id(priv, vif, sta); + + if (sta_id == IWL_INVALID_STATION) + return; + + if (iwl_scan_cancel(priv)) { + /* cancel scan failed, just live w/ bad key and rely + briefly on SW decryption */ + return; + } + + iwlagn_send_sta_key(priv, keyconf, sta_id, + iv32, phase1key, CMD_ASYNC); +} + +int iwl_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta) +{ + struct iwl_addsta_cmd sta_cmd; + u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta); + __le16 key_flags; + + /* if station isn't there, neither is the key */ + if (sta_id == IWL_INVALID_STATION) + return -ENOENT; + + spin_lock_bh(&priv->sta_lock); + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) + sta_id = IWL_INVALID_STATION; + spin_unlock_bh(&priv->sta_lock); + + if (sta_id == IWL_INVALID_STATION) + return 0; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys--; + + IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table)) + IWL_ERR(priv, "offset %d not used in uCode key table.\n", + keyconf->hw_key_idx); + + key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC | + STA_KEY_FLG_INVALID; + + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + key_flags |= STA_KEY_MULTICAST_MSK; + + sta_cmd.key.key_flags = key_flags; + sta_cmd.key.key_offset = keyconf->hw_key_idx; + sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; + sta_cmd.mode = STA_CONTROL_MODIFY_MSK; + + return iwl_send_add_sta(priv, &sta_cmd, 0); +} + +int iwl_set_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta) +{ + struct ieee80211_key_seq seq; + u16 p1k[5]; + int ret; + u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta); + const u8 *addr; + + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; + + lockdep_assert_held(&priv->mutex); + + keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv); + if (keyconf->hw_key_idx == WEP_INVALID_OFFSET) + return -ENOSPC; + + ctx->key_mapping_keys++; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (sta) + addr = sta->addr; + else /* station mode case only */ + addr = ctx->active.bssid_addr; + + /* pre-fill phase 1 key into device cache */ + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); + ret = iwlagn_send_sta_key(priv, keyconf, sta_id, + seq.tkip.iv32, p1k, 0); + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwlagn_send_sta_key(priv, keyconf, sta_id, + 0, NULL, 0); + break; + default: + IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher); + ret = -EINVAL; + } + + if (ret) { + ctx->key_mapping_keys--; + clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table); + } + + IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta ? sta->addr : NULL, ret); + + return ret; +} + +/** + * iwlagn_alloc_bcast_station - add broadcast station into driver's station table. + * + * This adds the broadcast station into the driver's station table + * and marks it driver active, so that it will be restored to the + * device at the next best time. + */ +int iwlagn_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_link_quality_cmd *link_cmd; + u8 sta_id; + + spin_lock_bh(&priv->sta_lock); + sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare broadcast station\n"); + spin_unlock_bh(&priv->sta_lock); + + return -EINVAL; + } + + priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used |= IWL_STA_BCAST; + spin_unlock_bh(&priv->sta_lock); + + link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_bh(&priv->sta_lock); + + return 0; +} + +/** + * iwl_update_bcast_station - update broadcast station's LQ command + * + * Only used by iwlagn. Placed here to have all bcast station management + * code together. + */ +int iwl_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_link_quality_cmd *link_cmd; + u8 sta_id = ctx->bcast_sta_id; + + link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); + if (!link_cmd) { + IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_bh(&priv->sta_lock); + if (priv->stations[sta_id].lq) + kfree(priv->stations[sta_id].lq); + else + IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n"); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_bh(&priv->sta_lock); + + return 0; +} + +int iwl_update_bcast_stations(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret = 0; + + for_each_context(priv, ctx) { + ret = iwl_update_bcast_station(priv, ctx); + if (ret) + break; + } + + return ret; +} + +/** + * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table + */ +int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) +{ + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + /* Remove "disable" flag, to enable Tx for this TID */ + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); + spin_unlock_bh(&priv->sta_lock); + + return iwl_send_add_sta(priv, &sta_cmd, 0); +} + +int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn) +{ + int sta_id; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); + spin_unlock_bh(&priv->sta_lock); + + return iwl_send_add_sta(priv, &sta_cmd, 0); +} + +int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid) +{ + int sta_id; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_bh(&priv->sta_lock); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); + spin_unlock_bh(&priv->sta_lock); + + return iwl_send_add_sta(priv, &sta_cmd, 0); +} + + + +void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) +{ + struct iwl_addsta_cmd cmd = { + .mode = STA_CONTROL_MODIFY_MSK, + .station_flags = STA_FLG_PWR_SAVE_MSK, + .station_flags_msk = STA_FLG_PWR_SAVE_MSK, + .sta.sta_id = sta_id, + .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK, + .sleep_tx_count = cpu_to_le16(cnt), + }; + + iwl_send_add_sta(priv, &cmd, CMD_ASYNC); +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c new file mode 100644 index 000000000000..c4736c8834c5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -0,0 +1,685 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + + +#include +#include +#include +#include +#include "iwl-io.h" +#include "iwl-modparams.h" +#include "iwl-debug.h" +#include "agn.h" +#include "dev.h" +#include "commands.h" +#include "tt.h" + +/* default Thermal Throttling transaction table + * Current state | Throttling Down | Throttling Up + *============================================================================= + * Condition Nxt State Condition Nxt State Condition Nxt State + *----------------------------------------------------------------------------- + * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A + * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0 + * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1 + * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 + *============================================================================= + */ +static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104}, + {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} +}; +static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95}, + {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} +}; +static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} +}; +static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD}, + {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}, + {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX} +}; + +/* Advance Thermal Throttling default restriction table */ +static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = { + {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true }, + {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true }, + {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false }, + {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false } +}; + +bool iwl_tt_is_low_power_state(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (tt->state >= IWL_TI_1) + return true; + return false; +} + +u8 iwl_tt_current_power_mode(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + return tt->tt_power_mode; +} + +bool iwl_ht_enabled(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + + if (!priv->thermal_throttle.advanced_tt) + return true; + restriction = tt->restriction + tt->state; + return restriction->is_ht; +} + +static bool iwl_within_ct_kill_margin(struct iwl_priv *priv) +{ + s32 temp = priv->temperature; /* degrees CELSIUS except specified */ + bool within_margin = false; + + if (!priv->thermal_throttle.advanced_tt) + within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >= + CT_KILL_THRESHOLD_LEGACY) ? true : false; + else + within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >= + CT_KILL_THRESHOLD) ? true : false; + return within_margin; +} + +bool iwl_check_for_ct_kill(struct iwl_priv *priv) +{ + bool is_ct_kill = false; + + if (iwl_within_ct_kill_margin(priv)) { + iwl_tt_enter_ct_kill(priv); + is_ct_kill = true; + } + return is_ct_kill; +} + +enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + + if (!priv->thermal_throttle.advanced_tt) + return IWL_ANT_OK_MULTI; + restriction = tt->restriction + tt->state; + return restriction->tx_stream; +} + +enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + + if (!priv->thermal_throttle.advanced_tt) + return IWL_ANT_OK_MULTI; + restriction = tt->restriction + tt->state; + return restriction->rx_stream; +} + +#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */ +#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */ + +/* + * toggle the bit to wake up uCode and check the temperature + * if the temperature is below CT, uCode will stay awake and send card + * state notification with CT_KILL bit clear to inform Thermal Throttling + * Management to change state. Otherwise, uCode will go back to sleep + * without doing anything, driver should continue the 5 seconds timer + * to wake up uCode for temperature check until temperature drop below CT + */ +static void iwl_tt_check_exit_ct_kill(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + unsigned long flags; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (tt->state == IWL_TI_CT_KILL) { + if (priv->thermal_throttle.ct_kill_toggle) { + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + priv->thermal_throttle.ct_kill_toggle = false; + } else { + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + priv->thermal_throttle.ct_kill_toggle = true; + } + iwl_read32(priv->trans, CSR_UCODE_DRV_GP1); + if (iwl_trans_grab_nic_access(priv->trans, false, &flags)) + iwl_trans_release_nic_access(priv->trans, &flags); + + /* Reschedule the ct_kill timer to occur in + * CT_KILL_EXIT_DURATION seconds to ensure we get a + * thermal update */ + IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n"); + mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, + jiffies + CT_KILL_EXIT_DURATION * HZ); + } +} + +static void iwl_perform_ct_kill_task(struct iwl_priv *priv, + bool stop) +{ + if (stop) { + IWL_DEBUG_TEMP(priv, "Stop all queues\n"); + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + IWL_DEBUG_TEMP(priv, + "Schedule 5 seconds CT_KILL Timer\n"); + mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, + jiffies + CT_KILL_EXIT_DURATION * HZ); + } else { + IWL_DEBUG_TEMP(priv, "Wake all queues\n"); + if (priv->mac80211_registered) + ieee80211_wake_queues(priv->hw); + } +} + +static void iwl_tt_ready_for_ct_kill(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* temperature timer expired, ready to go into CT_KILL state */ + if (tt->state != IWL_TI_CT_KILL) { + IWL_DEBUG_TEMP(priv, "entering CT_KILL state when " + "temperature timer expired\n"); + tt->state = IWL_TI_CT_KILL; + set_bit(STATUS_CT_KILL, &priv->status); + iwl_perform_ct_kill_task(priv, true); + } +} + +static void iwl_prepare_ct_kill_task(struct iwl_priv *priv) +{ + IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n"); + /* make request to retrieve statistics information */ + iwl_send_statistics_request(priv, 0, false); + /* Reschedule the ct_kill wait timer */ + mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm, + jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION)); +} + +#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY) +#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100) +#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90) + +/* + * Legacy thermal throttling + * 1) Avoid NIC destruction due to high temperatures + * Chip will identify dangerously high temperatures that can + * harm the device and will power down + * 2) Avoid the NIC power down due to high temperature + * Throttle early enough to lower the power consumption before + * drastic steps are needed + */ +static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + enum iwl_tt_state old_state; + +#ifdef CONFIG_IWLWIFI_DEBUG + if ((tt->tt_previous_temp) && + (temp > tt->tt_previous_temp) && + ((temp - tt->tt_previous_temp) > + IWL_TT_INCREASE_MARGIN)) { + IWL_DEBUG_TEMP(priv, + "Temperature increase %d degree Celsius\n", + (temp - tt->tt_previous_temp)); + } +#endif + old_state = tt->state; + /* in Celsius */ + if (temp >= IWL_MINIMAL_POWER_THRESHOLD) + tt->state = IWL_TI_CT_KILL; + else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2) + tt->state = IWL_TI_2; + else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1) + tt->state = IWL_TI_1; + else + tt->state = IWL_TI_0; + +#ifdef CONFIG_IWLWIFI_DEBUG + tt->tt_previous_temp = temp; +#endif + /* stop ct_kill_waiting_tm timer */ + del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); + if (tt->state != old_state) { + switch (tt->state) { + case IWL_TI_0: + /* + * When the system is ready to go back to IWL_TI_0 + * we only have to call iwl_power_update_mode() to + * do so. + */ + break; + case IWL_TI_1: + tt->tt_power_mode = IWL_POWER_INDEX_3; + break; + case IWL_TI_2: + tt->tt_power_mode = IWL_POWER_INDEX_4; + break; + default: + tt->tt_power_mode = IWL_POWER_INDEX_5; + break; + } + mutex_lock(&priv->mutex); + if (old_state == IWL_TI_CT_KILL) + clear_bit(STATUS_CT_KILL, &priv->status); + if (tt->state != IWL_TI_CT_KILL && + iwl_power_update_mode(priv, true)) { + /* TT state not updated + * try again during next temperature read + */ + if (old_state == IWL_TI_CT_KILL) + set_bit(STATUS_CT_KILL, &priv->status); + tt->state = old_state; + IWL_ERR(priv, "Cannot update power mode, " + "TT state not updated\n"); + } else { + if (tt->state == IWL_TI_CT_KILL) { + if (force) { + set_bit(STATUS_CT_KILL, &priv->status); + iwl_perform_ct_kill_task(priv, true); + } else { + iwl_prepare_ct_kill_task(priv); + tt->state = old_state; + } + } else if (old_state == IWL_TI_CT_KILL && + tt->state != IWL_TI_CT_KILL) + iwl_perform_ct_kill_task(priv, false); + IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n", + tt->state); + IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", + tt->tt_power_mode); + } + mutex_unlock(&priv->mutex); + } +} + +/* + * Advance thermal throttling + * 1) Avoid NIC destruction due to high temperatures + * Chip will identify dangerously high temperatures that can + * harm the device and will power down + * 2) Avoid the NIC power down due to high temperature + * Throttle early enough to lower the power consumption before + * drastic steps are needed + * Actions include relaxing the power down sleep thresholds and + * decreasing the number of TX streams + * 3) Avoid throughput performance impact as much as possible + * + *============================================================================= + * Condition Nxt State Condition Nxt State Condition Nxt State + *----------------------------------------------------------------------------- + * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A + * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0 + * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1 + * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 + *============================================================================= + */ +static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + int i; + bool changed = false; + enum iwl_tt_state old_state; + struct iwl_tt_trans *transaction; + + old_state = tt->state; + for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) { + /* based on the current TT state, + * find the curresponding transaction table + * each table has (IWL_TI_STATE_MAX - 1) entries + * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1)) + * will advance to the correct table. + * then based on the current temperature + * find the next state need to transaction to + * go through all the possible (IWL_TI_STATE_MAX - 1) entries + * in the current table to see if transaction is needed + */ + transaction = tt->transaction + + ((old_state * (IWL_TI_STATE_MAX - 1)) + i); + if (temp >= transaction->tt_low && + temp <= transaction->tt_high) { +#ifdef CONFIG_IWLWIFI_DEBUG + if ((tt->tt_previous_temp) && + (temp > tt->tt_previous_temp) && + ((temp - tt->tt_previous_temp) > + IWL_TT_INCREASE_MARGIN)) { + IWL_DEBUG_TEMP(priv, + "Temperature increase %d " + "degree Celsius\n", + (temp - tt->tt_previous_temp)); + } + tt->tt_previous_temp = temp; +#endif + if (old_state != + transaction->next_state) { + changed = true; + tt->state = + transaction->next_state; + } + break; + } + } + /* stop ct_kill_waiting_tm timer */ + del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); + if (changed) { + if (tt->state >= IWL_TI_1) { + /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */ + tt->tt_power_mode = IWL_POWER_INDEX_5; + + if (!iwl_ht_enabled(priv)) { + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) { + struct iwl_rxon_cmd *rxon; + + rxon = &ctx->staging; + + /* disable HT */ + rxon->flags &= ~( + RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + } + } else { + /* check HT capability and set + * according to the system HT capability + * in case get disabled before */ + iwl_set_rxon_ht(priv, &priv->current_ht_config); + } + + } else { + /* + * restore system power setting -- it will be + * recalculated automatically. + */ + + /* check HT capability and set + * according to the system HT capability + * in case get disabled before */ + iwl_set_rxon_ht(priv, &priv->current_ht_config); + } + mutex_lock(&priv->mutex); + if (old_state == IWL_TI_CT_KILL) + clear_bit(STATUS_CT_KILL, &priv->status); + if (tt->state != IWL_TI_CT_KILL && + iwl_power_update_mode(priv, true)) { + /* TT state not updated + * try again during next temperature read + */ + IWL_ERR(priv, "Cannot update power mode, " + "TT state not updated\n"); + if (old_state == IWL_TI_CT_KILL) + set_bit(STATUS_CT_KILL, &priv->status); + tt->state = old_state; + } else { + IWL_DEBUG_TEMP(priv, + "Thermal Throttling to new state: %u\n", + tt->state); + if (old_state != IWL_TI_CT_KILL && + tt->state == IWL_TI_CT_KILL) { + if (force) { + IWL_DEBUG_TEMP(priv, + "Enter IWL_TI_CT_KILL\n"); + set_bit(STATUS_CT_KILL, &priv->status); + iwl_perform_ct_kill_task(priv, true); + } else { + tt->state = old_state; + iwl_prepare_ct_kill_task(priv); + } + } else if (old_state == IWL_TI_CT_KILL && + tt->state != IWL_TI_CT_KILL) { + IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n"); + iwl_perform_ct_kill_task(priv, false); + } + } + mutex_unlock(&priv->mutex); + } +} + +/* Card State Notification indicated reach critical temperature + * if PSP not enable, no Thermal Throttling function will be performed + * just set the GP1 bit to acknowledge the event + * otherwise, go into IWL_TI_CT_KILL state + * since Card State Notification will not provide any temperature reading + * for Legacy mode + * so just pass the CT_KILL temperature to iwl_legacy_tt_handler() + * for advance mode + * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state + */ +static void iwl_bg_ct_enter(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter); + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_is_ready(priv)) + return; + + if (tt->state != IWL_TI_CT_KILL) { + IWL_ERR(priv, "Device reached critical temperature " + "- ucode going to sleep!\n"); + if (!priv->thermal_throttle.advanced_tt) + iwl_legacy_tt_handler(priv, + IWL_MINIMAL_POWER_THRESHOLD, + true); + else + iwl_advance_tt_handler(priv, + CT_KILL_THRESHOLD + 1, true); + } +} + +/* Card State Notification indicated out of critical temperature + * since Card State Notification will not provide any temperature reading + * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature + * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state + */ +static void iwl_bg_ct_exit(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit); + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_is_ready(priv)) + return; + + /* stop ct_kill_exit_tm timer */ + del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); + + if (tt->state == IWL_TI_CT_KILL) { + IWL_ERR(priv, + "Device temperature below critical" + "- ucode awake!\n"); + /* + * exit from CT_KILL state + * reset the current temperature reading + */ + priv->temperature = 0; + if (!priv->thermal_throttle.advanced_tt) + iwl_legacy_tt_handler(priv, + IWL_REDUCED_PERFORMANCE_THRESHOLD_2, + true); + else + iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD, + true); + } +} + +void iwl_tt_enter_ct_kill(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n"); + queue_work(priv->workqueue, &priv->ct_enter); +} + +void iwl_tt_exit_ct_kill(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n"); + queue_work(priv->workqueue, &priv->ct_exit); +} + +static void iwl_bg_tt_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); + s32 temp = priv->temperature; /* degrees CELSIUS except specified */ + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!priv->thermal_throttle.advanced_tt) + iwl_legacy_tt_handler(priv, temp, false); + else + iwl_advance_tt_handler(priv, temp, false); +} + +void iwl_tt_handler(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n"); + queue_work(priv->workqueue, &priv->tt_work); +} + +/* Thermal throttling initialization + * For advance thermal throttling: + * Initialize Thermal Index and temperature threshold table + * Initialize thermal throttling restriction table + */ +void iwl_tt_initialize(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); + struct iwl_tt_trans *transaction; + + IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n"); + + memset(tt, 0, sizeof(struct iwl_tt_mgmt)); + + tt->state = IWL_TI_0; + setup_timer(&priv->thermal_throttle.ct_kill_exit_tm, + iwl_tt_check_exit_ct_kill, (unsigned long)priv); + setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm, + iwl_tt_ready_for_ct_kill, (unsigned long)priv); + /* setup deferred ct kill work */ + INIT_WORK(&priv->tt_work, iwl_bg_tt_work); + INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); + INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); + + if (priv->lib->adv_thermal_throttle) { + IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); + tt->restriction = kcalloc(IWL_TI_STATE_MAX, + sizeof(struct iwl_tt_restriction), + GFP_KERNEL); + tt->transaction = kcalloc(IWL_TI_STATE_MAX * + (IWL_TI_STATE_MAX - 1), + sizeof(struct iwl_tt_trans), + GFP_KERNEL); + if (!tt->restriction || !tt->transaction) { + IWL_ERR(priv, "Fallback to Legacy Throttling\n"); + priv->thermal_throttle.advanced_tt = false; + kfree(tt->restriction); + tt->restriction = NULL; + kfree(tt->transaction); + tt->transaction = NULL; + } else { + transaction = tt->transaction + + (IWL_TI_0 * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_0[0], size); + transaction = tt->transaction + + (IWL_TI_1 * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_1[0], size); + transaction = tt->transaction + + (IWL_TI_2 * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_2[0], size); + transaction = tt->transaction + + (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_3[0], size); + size = sizeof(struct iwl_tt_restriction) * + IWL_TI_STATE_MAX; + memcpy(tt->restriction, + &restriction_range[0], size); + priv->thermal_throttle.advanced_tt = true; + } + } else { + IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n"); + priv->thermal_throttle.advanced_tt = false; + } +} + +/* cleanup thermal throttling management related memory and timer */ +void iwl_tt_exit(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + /* stop ct_kill_exit_tm timer if activated */ + del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); + /* stop ct_kill_waiting_tm timer if activated */ + del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); + cancel_work_sync(&priv->tt_work); + cancel_work_sync(&priv->ct_enter); + cancel_work_sync(&priv->ct_exit); + + if (priv->thermal_throttle.advanced_tt) { + /* free advance thermal throttling memory */ + kfree(tt->restriction); + tt->restriction = NULL; + kfree(tt->transaction); + tt->transaction = NULL; + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h new file mode 100644 index 000000000000..507726534b84 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -0,0 +1,128 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#ifndef __iwl_tt_setting_h__ +#define __iwl_tt_setting_h__ + +#include "commands.h" + +#define IWL_ABSOLUTE_ZERO 0 +#define IWL_ABSOLUTE_MAX 0xFFFFFFFF +#define IWL_TT_INCREASE_MARGIN 5 +#define IWL_TT_CT_KILL_MARGIN 3 + +enum iwl_antenna_ok { + IWL_ANT_OK_NONE, + IWL_ANT_OK_SINGLE, + IWL_ANT_OK_MULTI, +}; + +/* Thermal Throttling State Machine states */ +enum iwl_tt_state { + IWL_TI_0, /* normal temperature, system power state */ + IWL_TI_1, /* high temperature detect, low power state */ + IWL_TI_2, /* higher temperature detected, lower power state */ + IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */ + IWL_TI_STATE_MAX +}; + +/** + * struct iwl_tt_restriction - Thermal Throttling restriction table + * @tx_stream: number of tx stream allowed + * @is_ht: ht enable/disable + * @rx_stream: number of rx stream allowed + * + * This table is used by advance thermal throttling management + * based on the current thermal throttling state, and determines + * the number of tx/rx streams and the status of HT operation. + */ +struct iwl_tt_restriction { + enum iwl_antenna_ok tx_stream; + enum iwl_antenna_ok rx_stream; + bool is_ht; +}; + +/** + * struct iwl_tt_trans - Thermal Throttling transaction table + * @next_state: next thermal throttling mode + * @tt_low: low temperature threshold to change state + * @tt_high: high temperature threshold to change state + * + * This is used by the advanced thermal throttling algorithm + * to determine the next thermal state to go based on the + * current temperature. + */ +struct iwl_tt_trans { + enum iwl_tt_state next_state; + u32 tt_low; + u32 tt_high; +}; + +/** + * struct iwl_tt_mgnt - Thermal Throttling Management structure + * @advanced_tt: advanced thermal throttle required + * @state: current Thermal Throttling state + * @tt_power_mode: Thermal Throttling power mode index + * being used to set power level when + * when thermal throttling state != IWL_TI_0 + * the tt_power_mode should set to different + * power mode based on the current tt state + * @tt_previous_temperature: last measured temperature + * @iwl_tt_restriction: ptr to restriction tbl, used by advance + * thermal throttling to determine how many tx/rx streams + * should be used in tt state; and can HT be enabled or not + * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling + * state transaction + * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature + * @ct_kill_exit_tm: timer to exit thermal kill + */ +struct iwl_tt_mgmt { + enum iwl_tt_state state; + bool advanced_tt; + u8 tt_power_mode; + bool ct_kill_toggle; +#ifdef CONFIG_IWLWIFI_DEBUG + s32 tt_previous_temp; +#endif + struct iwl_tt_restriction *restriction; + struct iwl_tt_trans *transaction; + struct timer_list ct_kill_exit_tm; + struct timer_list ct_kill_waiting_tm; +}; + +u8 iwl_tt_current_power_mode(struct iwl_priv *priv); +bool iwl_tt_is_low_power_state(struct iwl_priv *priv); +bool iwl_ht_enabled(struct iwl_priv *priv); +enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv); +enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv); +void iwl_tt_enter_ct_kill(struct iwl_priv *priv); +void iwl_tt_exit_ct_kill(struct iwl_priv *priv); +void iwl_tt_handler(struct iwl_priv *priv); +void iwl_tt_initialize(struct iwl_priv *priv); +void iwl_tt_exit(struct iwl_priv *priv); + +#endif /* __iwl_tt_setting_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c new file mode 100644 index 000000000000..bddd19769035 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -0,0 +1,1412 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include "iwl-io.h" +#include "iwl-trans.h" +#include "iwl-agn-hw.h" +#include "dev.h" +#include "agn.h" + +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, +}; + +static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) +{ + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || + info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT || + info->flags & IEEE80211_TX_CTL_AMPDU) + *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, + struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, u8 sta_id) +{ + __le16 fc = hdr->frame_control; + __le32 tx_flags = tx_cmd->tx_flags; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + tx_flags |= TX_CMD_FLG_ACK_MSK; + else + tx_flags &= ~TX_CMD_FLG_ACK_MSK; + + if (ieee80211_is_probe_resp(fc)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + else if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; + else if (info->band == IEEE80211_BAND_2GHZ && + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && + (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || + ieee80211_is_reassoc_req(fc) || + info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) + tx_flags |= TX_CMD_FLG_IGNORE_BT; + + + tx_cmd->sta_id = sta_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_cmd->tid_tspec = IWL_TID_NON_QOS; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + else + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } + + iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + __le16 fc) +{ + u32 rate_flags; + int rate_idx; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 rate_plcp; + + if (priv->wowlan) { + rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT; + data_retry_limit = IWLAGN_LOW_RETRY_LIMIT; + } else { + /* Set retry limit on RTS packets */ + rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) { + data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT; + rts_retry_limit = + min(data_retry_limit, rts_retry_limit); + } else if (ieee80211_is_back_req(fc)) + data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT; + else + data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; + } + + tx_cmd->data_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + /* DATA packets will use the uCode station table for rate/antenna + * selection */ + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + return; + } else if (ieee80211_is_back_req(fc)) + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * index is invalid. + */ + rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || + (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) + rate_idx = rate_lowest_index( + &priv->nvm_data->bands[info->band], sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set up antennas */ + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && + priv->bt_full_concurrent) { + /* operated as 1x1 in full concurrency mode */ + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, + first_antenna(priv->nvm_data->valid_tx_ant)); + } else + priv->mgmt_tx_ant = iwl_toggle_tx_ant( + priv, priv->mgmt_tx_ant, + priv->nvm_data->valid_tx_ant); + rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); +} + +static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | + (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", keyconf->keyidx); + break; + + default: + IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); + break; + } +} + +/** + * iwl_sta_id_or_broadcast - return sta_id or broadcast sta + * @context: the current context + * @sta: mac80211 station + * + * In certain circumstances mac80211 passes a station pointer + * that may be %NULL, for example during TX or key setup. In + * that case, we need to use the broadcast station, so this + * inline wraps that pattern. + */ +static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context, + struct ieee80211_sta *sta) +{ + int sta_id; + + if (!sta) + return context->bcast_sta_id; + + sta_id = iwl_sta_id(sta); + + /* + * mac80211 should not be passing a partially + * initialised station! + */ + WARN_ON(sta_id == IWL_INVALID_STATION); + + return sta_id; +} + +/* + * start REPLY_TX command process + */ +int iwlagn_tx_skb(struct iwl_priv *priv, + struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_station_priv *sta_priv = NULL; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + __le16 fc; + u8 hdr_len; + u16 len, seq_number = 0; + u8 sta_id, tid = IWL_MAX_TID_COUNT; + bool is_agg = false, is_data_qos = false; + int txq_id; + + if (info->control.vif) + ctx = iwl_rxon_ctx_from_vif(info->control.vif); + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock_priv; + } + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + if (unlikely(ieee80211_is_probe_resp(fc))) { + struct iwl_wipan_noa_data *noa_data = + rcu_dereference(priv->noa_data); + + if (noa_data && + pskb_expand_head(skb, 0, noa_data->length, + GFP_ATOMIC) == 0) { + memcpy(skb_put(skb, noa_data->length), + noa_data->data, noa_data->length); + hdr = (struct ieee80211_hdr *)skb->data; + } + } + + hdr_len = ieee80211_hdrlen(fc); + + /* For management frames use broadcast id to do not break aggregation */ + if (!ieee80211_is_data(fc)) + sta_id = ctx->bcast_sta_id; + else { + /* Find index into station table for destination station */ + sta_id = iwl_sta_id_or_broadcast(ctx, sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock_priv; + } + } + + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_priv->asleep && + (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + * + * FIXME: If we get two non-bufferable frames one + * after the other, we might only send out one of + * them because this is racy. + */ + iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); + } + + dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans); + + if (unlikely(!dev_cmd)) + goto drop_unlock_priv; + + memset(dev_cmd, 0, sizeof(*dev_cmd)); + dev_cmd->hdr.cmd = REPLY_TX; + tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb); + + /* TODO need this for burst mode later on */ + iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); + + iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc); + + memset(&info->status, 0, sizeof(info->status)); + + info->driver_data[0] = ctx; + info->driver_data[1] = dev_cmd; + /* From now on, we cannot access info->control */ + + spin_lock(&priv->sta_lock); + + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { + u8 *qc = NULL; + struct iwl_tid_data *tid_data; + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + goto drop_unlock_sta; + tid_data = &priv->tid_data[sta_id][tid]; + + /* aggregation is on for this */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + tid_data->agg.state != IWL_AGG_ON) { + IWL_ERR(priv, + "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n", + info->flags, tid_data->agg.state); + IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n", + sta_id, tid, + IEEE80211_SEQ_TO_SN(tid_data->seq_number)); + goto drop_unlock_sta; + } + + /* We can receive packets from the stack in IWL_AGG_{ON,OFF} + * only. Check this here. + */ + if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && + tid_data->agg.state != IWL_AGG_OFF, + "Tx while agg.state = %d\n", tid_data->agg.state)) + goto drop_unlock_sta; + + seq_number = tid_data->seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + + if (info->flags & IEEE80211_TX_CTL_AMPDU) + is_agg = true; + is_data_qos = true; + } + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + txq_id = info->hw_queue; + + if (is_agg) + txq_id = priv->tid_data[sta_id][tid].agg.txq_id; + else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + /* + * The microcode will clear the more data + * bit in the last frame it transmits. + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + WARN_ON_ONCE(is_agg && + priv->queue_to_mac80211[txq_id] != info->hw_queue); + + IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid, + txq_id, seq_number); + + if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id)) + goto drop_unlock_sta; + + if (is_data_qos && !ieee80211_has_morefrags(fc)) + priv->tid_data[sta_id][tid].seq_number = seq_number; + + spin_unlock(&priv->sta_lock); + + /* + * Avoid atomic ops if it isn't an associated client. + * Also, if this is a packet for aggregation, don't + * increase the counter because the ucode will stop + * aggregation queues when their respective station + * goes to sleep. + */ + if (sta_priv && sta_priv->client && !is_agg) + atomic_inc(&sta_priv->pending_frames); + + return 0; + +drop_unlock_sta: + if (dev_cmd) + iwl_trans_free_tx_cmd(priv->trans, dev_cmd); + spin_unlock(&priv->sta_lock); +drop_unlock_priv: + return -1; +} + +static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) +{ + int q; + + for (q = IWLAGN_FIRST_AMPDU_QUEUE; + q < priv->cfg->base_params->num_of_queues; q++) { + if (!test_and_set_bit(q, priv->agg_q_alloc)) { + priv->queue_to_mac80211[q] = mq; + return q; + } + } + + return -ENOSPC; +} + +static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q) +{ + clear_bit(q, priv->agg_q_alloc); + priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE; +} + +int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct iwl_tid_data *tid_data; + int sta_id, txq_id; + enum iwl_agg_state agg_state; + + sta_id = iwl_sta_id(sta); + + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_bh(&priv->sta_lock); + + tid_data = &priv->tid_data[sta_id][tid]; + txq_id = tid_data->agg.txq_id; + + switch (tid_data->agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_STARTING: + /* + * This can happen when the session is stopped before + * we receive ADDBA response + */ + IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(priv, + "Stopping AGG while state not ON or starting for %d on %d (%d)\n", + sta_id, tid, tid_data->agg.state); + spin_unlock_bh(&priv->sta_lock); + return 0; + } + + tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); + + /* There are still packets for this RA / TID in the HW */ + if (!test_bit(txq_id, priv->agg_q_alloc)) { + IWL_DEBUG_TX_QUEUES(priv, + "stopping AGG on STA/TID %d/%d but hwq %d not used\n", + sta_id, tid, txq_id); + } else if (tid_data->agg.ssn != tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, + "Can't proceed: ssn %d, next_recl = %d\n", + tid_data->agg.ssn, + tid_data->next_reclaimed); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_bh(&priv->sta_lock); + return 0; + } + + IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", + tid_data->agg.ssn); +turn_off: + agg_state = tid_data->agg.state; + tid_data->agg.state = IWL_AGG_OFF; + + spin_unlock_bh(&priv->sta_lock); + + if (test_bit(txq_id, priv->agg_q_alloc)) { + /* + * If the transport didn't know that we wanted to start + * agreggation, don't tell it that we want to stop them. + * This can happen when we don't get the addBA response on + * time, or we hadn't time to drain the AC queues. + */ + if (agg_state == IWL_AGG_ON) + iwl_trans_txq_disable(priv->trans, txq_id, true); + else + IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", + agg_state); + iwlagn_dealloc_agg_txq(priv, txq_id); + } + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + return 0; +} + +int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + struct iwl_tid_data *tid_data; + int sta_id, txq_id, ret; + + IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", + sta->addr, tid); + + sta_id = iwl_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Start AGG on invalid station\n"); + return -ENXIO; + } + if (unlikely(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + + if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { + IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); + return -ENXIO; + } + + txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]); + if (txq_id < 0) { + IWL_DEBUG_TX_QUEUES(priv, + "No free aggregation queue for %pM/%d\n", + sta->addr, tid); + return txq_id; + } + + ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + if (ret) + return ret; + + spin_lock_bh(&priv->sta_lock); + tid_data = &priv->tid_data[sta_id][tid]; + tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + + *ssn = tid_data->agg.ssn; + + if (*ssn == tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", + tid_data->agg.ssn); + tid_data->agg.state = IWL_AGG_STARTING; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " + "next_reclaimed = %d\n", + tid_data->agg.ssn, + tid_data->next_reclaimed); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_bh(&priv->sta_lock); + + return ret; +} + +int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct iwl_tid_data *tid_data; + enum iwl_agg_state agg_state; + int sta_id, txq_id; + sta_id = iwl_sta_id(sta); + + /* + * First set the agg state to OFF to avoid calling + * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty. + */ + spin_lock_bh(&priv->sta_lock); + + tid_data = &priv->tid_data[sta_id][tid]; + txq_id = tid_data->agg.txq_id; + agg_state = tid_data->agg.state; + IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n", + sta_id, tid, txq_id, tid_data->agg.state); + + tid_data->agg.state = IWL_AGG_OFF; + + spin_unlock_bh(&priv->sta_lock); + + if (iwlagn_txfifo_flush(priv, BIT(txq_id))) + IWL_ERR(priv, "Couldn't flush the AGG queue\n"); + + if (test_bit(txq_id, priv->agg_q_alloc)) { + /* + * If the transport didn't know that we wanted to start + * agreggation, don't tell it that we want to stop them. + * This can happen when we don't get the addBA response on + * time, or we hadn't time to drain the AC queues. + */ + if (agg_state == IWL_AGG_ON) + iwl_trans_txq_disable(priv->trans, txq_id, true); + else + IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", + agg_state); + iwlagn_dealloc_agg_txq(priv, txq_id); + } + + return 0; +} + +int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size) +{ + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + int q, fifo; + u16 ssn; + + buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); + + spin_lock_bh(&priv->sta_lock); + ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; + q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id; + priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON; + spin_unlock_bh(&priv->sta_lock); + + fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; + + iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid, + buf_size, ssn, 0); + + /* + * If the limit is 0, then it wasn't initialised yet, + * use the default. We can do that since we take the + * minimum below, and we don't want to go above our + * default due to hardware restrictions. + */ + if (sta_priv->max_agg_bufsize == 0) + sta_priv->max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_DEF; + + /* + * Even though in theory the peer could have different + * aggregation reorder buffer sizes for different sessions, + * our ucode doesn't allow for that and has a global limit + * for each station. Therefore, use the minimum of all the + * aggregation sessions and our default value. + */ + sta_priv->max_agg_bufsize = + min(sta_priv->max_agg_bufsize, buf_size); + + if (priv->hw_params.use_rts_for_aggregation) { + /* + * switch to RTS/CTS if it is the prefer protection + * method for HT traffic + */ + + sta_priv->lq_sta.lq.general_params.flags |= + LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + } + priv->agg_tids_count++; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); + + sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = + sta_priv->max_agg_bufsize; + + IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + + return iwl_send_lq_cmd(priv, ctx, + &sta_priv->lq_sta.lq, CMD_ASYNC, false); +} + +static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) +{ + struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid]; + enum iwl_rxon_context_id ctx; + struct ieee80211_vif *vif; + u8 *addr; + + lockdep_assert_held(&priv->sta_lock); + + addr = priv->stations[sta_id].sta.sta.addr; + ctx = priv->stations[sta_id].ctxid; + vif = priv->contexts[ctx].vif; + + switch (priv->tid_data[sta_id][tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* There are no packets for this RA / TID in the HW any more */ + if (tid_data->agg.ssn == tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, + "Can continue DELBA flow ssn = next_recl = %d\n", + tid_data->next_reclaimed); + iwl_trans_txq_disable(priv->trans, + tid_data->agg.txq_id, true); + iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); + tid_data->agg.state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* There are no packets for this RA / TID in the HW any more */ + if (tid_data->agg.ssn == tid_data->next_reclaimed) { + IWL_DEBUG_TX_QUEUES(priv, + "Can continue ADDBA flow ssn = next_recl = %d\n", + tid_data->next_reclaimed); + tid_data->agg.state = IWL_AGG_STARTING; + ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); + } + break; + default: + break; + } +} + +static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr1) +{ + struct ieee80211_sta *sta; + struct iwl_station_priv *sta_priv; + + rcu_read_lock(); + sta = ieee80211_find_sta(ctx->vif, addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(priv->hw, sta, false); + } + rcu_read_unlock(); +} + +/** + * translate ucode response to mac80211 tx status control values + */ +static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->status.rates[0]; + + info->status.antenna = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_MCS_HT_MSK) + r->flags |= IEEE80211_TX_RC_MCS; + if (rate_n_flags & RATE_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + if (rate_n_flags & RATE_MCS_HT40_MSK) + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (rate_n_flags & RATE_MCS_DUP_MSK) + r->flags |= IEEE80211_TX_RC_DUP_DATA; + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(BT_PRIO); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(FIFO_UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); + TX_STATUS_FAIL(PASSIVE_NO_RX); + TX_STATUS_FAIL(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status) +{ + status &= AGG_TX_STATUS_MSK; + + switch (status) { + case AGG_TX_STATE_UNDERRUN_MSK: + priv->reply_agg_tx_stats.underrun++; + break; + case AGG_TX_STATE_BT_PRIO_MSK: + priv->reply_agg_tx_stats.bt_prio++; + break; + case AGG_TX_STATE_FEW_BYTES_MSK: + priv->reply_agg_tx_stats.few_bytes++; + break; + case AGG_TX_STATE_ABORT_MSK: + priv->reply_agg_tx_stats.abort++; + break; + case AGG_TX_STATE_LAST_SENT_TTL_MSK: + priv->reply_agg_tx_stats.last_sent_ttl++; + break; + case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK: + priv->reply_agg_tx_stats.last_sent_try++; + break; + case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK: + priv->reply_agg_tx_stats.last_sent_bt_kill++; + break; + case AGG_TX_STATE_SCD_QUERY_MSK: + priv->reply_agg_tx_stats.scd_query++; + break; + case AGG_TX_STATE_TEST_BAD_CRC32_MSK: + priv->reply_agg_tx_stats.bad_crc32++; + break; + case AGG_TX_STATE_RESPONSE_MSK: + priv->reply_agg_tx_stats.response++; + break; + case AGG_TX_STATE_DUMP_TX_MSK: + priv->reply_agg_tx_stats.dump_tx++; + break; + case AGG_TX_STATE_DELAY_TX_MSK: + priv->reply_agg_tx_stats.delay_tx++; + break; + default: + priv->reply_agg_tx_stats.unknown++; + break; + } +} + +static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)&tx_resp->status + + tx_resp->frame_count) & IEEE80211_MAX_SN; +} + +static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, + struct iwlagn_tx_resp *tx_resp) +{ + struct agg_tx_status *frame_status = &tx_resp->status; + int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> + IWLAGN_TX_RES_TID_POS; + int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> + IWLAGN_TX_RES_RA_POS; + struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg; + u32 status = le16_to_cpu(tx_resp->status.status); + int i; + + WARN_ON(tid == IWL_TID_NON_QOS); + + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY(priv, + "got tx response w/o block-ack\n"); + + agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + agg->wait_for_ba = (tx_resp->frame_count > 1); + + /* + * If the BT kill count is non-zero, we'll get this + * notification again. + */ + if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); + } + + if (tx_resp->frame_count == 1) + return; + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n", + agg->txq_id, + le32_to_cpu(tx_resp->rate_n_flags), + iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count); + + /* Construct bit-map of pending frames within Tx window */ + for (i = 0; i < tx_resp->frame_count; i++) { + u16 fstatus = le16_to_cpu(frame_status[i].status); + u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS; + + if (status & AGG_TX_STATUS_MSK) + iwlagn_count_agg_tx_err_status(priv, fstatus); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + if (status & AGG_TX_STATUS_MSK || retry_cnt > 1) + IWL_DEBUG_TX_REPLY(priv, + "%d: status %s (0x%04x), try-count (0x%01x)\n", + i, + iwl_get_agg_tx_fail_reason(fstatus), + fstatus & AGG_TX_STATUS_MSK, + retry_cnt); + } +} + +#ifdef CONFIG_IWLWIFI_DEBUG +#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x + +const char *iwl_get_agg_tx_fail_reason(u16 status) +{ + status &= AGG_TX_STATUS_MSK; + switch (status) { + case AGG_TX_STATE_TRANSMITTED: + return "SUCCESS"; + AGG_TX_STATE_FAIL(UNDERRUN_MSK); + AGG_TX_STATE_FAIL(BT_PRIO_MSK); + AGG_TX_STATE_FAIL(FEW_BYTES_MSK); + AGG_TX_STATE_FAIL(ABORT_MSK); + AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK); + AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK); + AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK); + AGG_TX_STATE_FAIL(SCD_QUERY_MSK); + AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK); + AGG_TX_STATE_FAIL(RESPONSE_MSK); + AGG_TX_STATE_FAIL(DUMP_TX_MSK); + AGG_TX_STATE_FAIL(DELAY_TX_MSK); + } + + return "UNKNOWN"; +} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_POSTPONE_DELAY: + priv->reply_tx_stats.pp_delay++; + break; + case TX_STATUS_POSTPONE_FEW_BYTES: + priv->reply_tx_stats.pp_few_bytes++; + break; + case TX_STATUS_POSTPONE_BT_PRIO: + priv->reply_tx_stats.pp_bt_prio++; + break; + case TX_STATUS_POSTPONE_QUIET_PERIOD: + priv->reply_tx_stats.pp_quiet_period++; + break; + case TX_STATUS_POSTPONE_CALC_TTAK: + priv->reply_tx_stats.pp_calc_ttak++; + break; + case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: + priv->reply_tx_stats.int_crossed_retry++; + break; + case TX_STATUS_FAIL_SHORT_LIMIT: + priv->reply_tx_stats.short_limit++; + break; + case TX_STATUS_FAIL_LONG_LIMIT: + priv->reply_tx_stats.long_limit++; + break; + case TX_STATUS_FAIL_FIFO_UNDERRUN: + priv->reply_tx_stats.fifo_underrun++; + break; + case TX_STATUS_FAIL_DRAIN_FLOW: + priv->reply_tx_stats.drain_flow++; + break; + case TX_STATUS_FAIL_RFKILL_FLUSH: + priv->reply_tx_stats.rfkill_flush++; + break; + case TX_STATUS_FAIL_LIFE_EXPIRE: + priv->reply_tx_stats.life_expire++; + break; + case TX_STATUS_FAIL_DEST_PS: + priv->reply_tx_stats.dest_ps++; + break; + case TX_STATUS_FAIL_HOST_ABORTED: + priv->reply_tx_stats.host_abort++; + break; + case TX_STATUS_FAIL_BT_RETRY: + priv->reply_tx_stats.bt_retry++; + break; + case TX_STATUS_FAIL_STA_INVALID: + priv->reply_tx_stats.sta_invalid++; + break; + case TX_STATUS_FAIL_FRAG_DROPPED: + priv->reply_tx_stats.frag_drop++; + break; + case TX_STATUS_FAIL_TID_DISABLE: + priv->reply_tx_stats.tid_disable++; + break; + case TX_STATUS_FAIL_FIFO_FLUSHED: + priv->reply_tx_stats.fifo_flush++; + break; + case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL: + priv->reply_tx_stats.insuff_cf_poll++; + break; + case TX_STATUS_FAIL_PASSIVE_NO_RX: + priv->reply_tx_stats.fail_hw_drop++; + break; + case TX_STATUS_FAIL_NO_BEACON_ON_RADAR: + priv->reply_tx_stats.sta_color_mismatch++; + break; + default: + priv->reply_tx_stats.unknown++; + break; + } +} + +static void iwlagn_set_tx_status(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwlagn_tx_resp *tx_resp) +{ + u16 status = le16_to_cpu(tx_resp->status.status); + + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags |= iwl_tx_status_to_mac80211(status); + iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), + info); + if (!iwl_is_tx_success(status)) + iwlagn_count_tx_err_status(priv, status); +} + +static void iwl_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status) +{ + if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { + IWL_ERR(priv, "Tx flush command to flush out all frames\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->tx_flush); + } +} + +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence); + struct iwlagn_tx_resp *tx_resp = (void *)pkt->data; + struct ieee80211_hdr *hdr; + u32 status = le16_to_cpu(tx_resp->status.status); + u16 ssn = iwlagn_get_scd_ssn(tx_resp); + int tid; + int sta_id; + int freed; + struct ieee80211_tx_info *info; + struct sk_buff_head skbs; + struct sk_buff *skb; + struct iwl_rxon_context *ctx; + bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); + + tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> + IWLAGN_TX_RES_TID_POS; + sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> + IWLAGN_TX_RES_RA_POS; + + spin_lock_bh(&priv->sta_lock); + + if (is_agg) { + WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT || + tid >= IWL_MAX_TID_COUNT); + if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id) + IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id, + priv->tid_data[sta_id][tid].agg.txq_id); + iwl_rx_reply_tx_agg(priv, tx_resp); + } + + __skb_queue_head_init(&skbs); + + if (tx_resp->frame_count == 1) { + u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); + next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10); + + if (is_agg) { + /* If this is an aggregation queue, we can rely on the + * ssn since the wifi sequence number corresponds to + * the index in the TFD ring (%256). + * The seq_ctl is the sequence control of the packet + * to which this Tx response relates. But if there is a + * hole in the bitmap of the BA we received, this Tx + * response may allow to reclaim the hole and all the + * subsequent packets that were already acked. + * In that case, seq_ctl != ssn, and the next packet + * to be reclaimed will be ssn and not seq_ctl. + */ + next_reclaimed = ssn; + } + + if (tid != IWL_TID_NON_QOS) { + priv->tid_data[sta_id][tid].next_reclaimed = + next_reclaimed; + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", + next_reclaimed); + } + + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); + + iwlagn_check_ratid_empty(priv, sta_id, tid); + freed = 0; + + /* process frames */ + skb_queue_walk(&skbs, skb) { + hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + priv->last_seq_ctl = tx_resp->seq_ctl; + + info = IEEE80211_SKB_CB(skb); + ctx = info->driver_data[0]; + iwl_trans_free_tx_cmd(priv->trans, + info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + + if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && + ctx->vif && + ctx->vif->type == NL80211_IFTYPE_STATION) { + /* block and stop all queues */ + priv->passive_no_rx = true; + IWL_DEBUG_TX_QUEUES(priv, + "stop all queues: passive channel\n"); + ieee80211_stop_queues(priv->hw); + + IWL_DEBUG_TX_REPLY(priv, + "TXQ %d status %s (0x%08x) " + "rate_n_flags 0x%x retries %d\n", + txq_id, + iwl_get_tx_fail_reason(status), + status, + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY(priv, + "FrameCnt = %d, idx=%d\n", + tx_resp->frame_count, cmd_index); + } + + /* check if BAR is needed */ + if (is_agg && !iwl_is_tx_success(status)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), + tx_resp); + if (!is_agg) + iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); + + freed++; + } + + if (tid != IWL_TID_NON_QOS) { + priv->tid_data[sta_id][tid].next_reclaimed = + next_reclaimed; + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", + next_reclaimed); + } + + if (!is_agg && freed != 1) + IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id, + iwl_get_tx_fail_reason(status), status); + + IWL_DEBUG_TX_REPLY(priv, + "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n", + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame, + SEQ_TO_INDEX(sequence), ssn, + le16_to_cpu(tx_resp->seq_ctl)); + } + + iwl_check_abort_status(priv, tx_resp->frame_count, status); + spin_unlock_bh(&priv->sta_lock); + + while (!skb_queue_empty(&skbs)) { + skb = __skb_dequeue(&skbs); + ieee80211_tx_status(priv->hw, skb); + } +} + +/** + * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA + * + * Handles block-acknowledge notification from device, which reports success + * of frames sent via aggregation. + */ +void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; + struct iwl_ht_agg *agg; + struct sk_buff_head reclaimed_skbs; + struct sk_buff *skb; + int sta_id; + int tid; + int freed; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (scd_flow >= priv->cfg->base_params->num_of_queues) { + IWL_ERR(priv, + "BUG_ON scd_flow is bigger than number of queues\n"); + return; + } + + sta_id = ba_resp->sta_id; + tid = ba_resp->tid; + agg = &priv->tid_data[sta_id][tid].agg; + + spin_lock_bh(&priv->sta_lock); + + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IWL_ERR(priv, "Received BA when not expected\n"); + spin_unlock_bh(&priv->sta_lock); + return; + } + + if (unlikely(scd_flow != agg->txq_id)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now. + * Since it is can possibly happen very often and in order + * not to fill the syslog, don't use IWL_ERR or IWL_WARN + */ + IWL_DEBUG_TX_QUEUES(priv, + "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", + scd_flow, sta_id, tid, agg->txq_id); + spin_unlock_bh(&priv->sta_lock); + return; + } + + __skb_queue_head_init(&reclaimed_skbs); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn, + &reclaimed_skbs); + + IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " + "sta_id = %d\n", + agg->wait_for_ba, + (u8 *) &ba_resp->sta_addr_lo32, + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " + "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", + ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl), + (unsigned long long)le64_to_cpu(ba_resp->bitmap), + scd_flow, ba_resp_scd_ssn, ba_resp->txed, + ba_resp->txed_2_done); + + /* Mark that the expected block-ack response arrived */ + agg->wait_for_ba = false; + + /* Sanity check values reported by uCode */ + if (ba_resp->txed_2_done > ba_resp->txed) { + IWL_DEBUG_TX_REPLY(priv, + "bogus sent(%d) and ack(%d) count\n", + ba_resp->txed, ba_resp->txed_2_done); + /* + * set txed_2_done = txed, + * so it won't impact rate scale + */ + ba_resp->txed = ba_resp->txed_2_done; + } + + priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn; + + iwlagn_check_ratid_empty(priv, sta_id, tid); + freed = 0; + + skb_queue_walk(&reclaimed_skbs, skb) { + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (ieee80211_is_data_qos(hdr->frame_control)) + freed++; + else + WARN_ON_ONCE(1); + + iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + + if (freed == 1) { + /* this is the first skb we deliver in this batch */ + /* put the rate scaling data there */ + info = IEEE80211_SKB_CB(skb); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = ba_resp->txed_2_done; + info->status.ampdu_len = ba_resp->txed; + iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, + info); + } + } + + spin_unlock_bh(&priv->sta_lock); + + while (!skb_queue_empty(&reclaimed_skbs)) { + skb = __skb_dequeue(&reclaimed_skbs); + ieee80211_tx_status(priv->hw, skb); + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c new file mode 100644 index 000000000000..931a8e4269ef --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -0,0 +1,452 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include + +#include "iwl-io.h" +#include "iwl-agn-hw.h" +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-op-mode.h" + +#include "dev.h" +#include "agn.h" +#include "calib.h" + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static inline const struct fw_img * +iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type) +{ + if (ucode_type >= IWL_UCODE_TYPE_MAX) + return NULL; + + return &priv->fw->img[ucode_type]; +} + +/* + * Calibration + */ +static int iwl_set_Xtal_calib(struct iwl_priv *priv) +{ + struct iwl_calib_xtal_freq_cmd cmd; + __le16 *xtal_calib = priv->nvm_data->xtal_calib; + + iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); + cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); + cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); + return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd)); +} + +static int iwl_set_temperature_offset_calib(struct iwl_priv *priv) +{ + struct iwl_calib_temperature_offset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); + cmd.radio_sensor_offset = priv->nvm_data->raw_temperature; + if (!(cmd.radio_sensor_offset)) + cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; + + IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n", + le16_to_cpu(cmd.radio_sensor_offset)); + return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd)); +} + +static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) +{ + struct iwl_calib_temperature_offset_v2_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); + cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature; + cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature; + if (!cmd.radio_sensor_offset_low) { + IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); + cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; + cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; + } + cmd.burntVoltageRef = priv->nvm_data->calib_voltage; + + IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", + le16_to_cpu(cmd.radio_sensor_offset_high)); + IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n", + le16_to_cpu(cmd.radio_sensor_offset_low)); + IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n", + le16_to_cpu(cmd.burntVoltageRef)); + + return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd)); +} + +static int iwl_send_calib_cfg(struct iwl_priv *priv) +{ + struct iwl_calib_cfg_cmd calib_cfg_cmd; + struct iwl_host_cmd cmd = { + .id = CALIBRATION_CFG_CMD, + .len = { sizeof(struct iwl_calib_cfg_cmd), }, + .data = { &calib_cfg_cmd, }, + }; + + memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); + calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.flags = + IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; + + return iwl_dvm_send_cmd(priv, &cmd); +} + +int iwl_init_alive_start(struct iwl_priv *priv) +{ + int ret; + + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { + /* + * Tell uCode we are ready to perform calibration + * need to perform this before any calibration + * no need to close the envlope since we are going + * to load the runtime uCode later. + */ + ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); + if (ret) + return ret; + + } + + ret = iwl_send_calib_cfg(priv); + if (ret) + return ret; + + /** + * temperature offset calibration is only needed for runtime ucode, + * so prepare the value now. + */ + if (priv->lib->need_temp_offset_calib) { + if (priv->lib->temp_offset_v2) + return iwl_set_temperature_offset_calib_v2(priv); + else + return iwl_set_temperature_offset_calib(priv); + } + + return 0; +} + +static int iwl_send_wimax_coex(struct iwl_priv *priv) +{ + struct iwl_wimax_coex_cmd coex_cmd; + + /* coexistence is disabled */ + memset(&coex_cmd, 0, sizeof(coex_cmd)); + + return iwl_dvm_send_cmd_pdu(priv, + COEX_PRIORITY_TABLE_CMD, 0, + sizeof(coex_cmd), &coex_cmd); +} + +static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { + ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | + (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), + 0, 0, 0, 0, 0, 0, 0 +}; + +void iwl_send_prio_tbl(struct iwl_priv *priv) +{ + struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd; + + memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl, + sizeof(iwl_bt_prio_tbl)); + if (iwl_dvm_send_cmd_pdu(priv, + REPLY_BT_COEX_PRIO_TABLE, 0, + sizeof(prio_tbl_cmd), &prio_tbl_cmd)) + IWL_ERR(priv, "failed to send BT prio tbl command\n"); +} + +int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) +{ + struct iwl_bt_coex_prot_env_cmd env_cmd; + int ret; + + env_cmd.action = action; + env_cmd.type = type; + ret = iwl_dvm_send_cmd_pdu(priv, + REPLY_BT_COEX_PROT_ENV, 0, + sizeof(env_cmd), &env_cmd); + if (ret) + IWL_ERR(priv, "failed to send BT env command\n"); + return ret; +} + +static const u8 iwlagn_default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, +}; + +static const u8 iwlagn_ipan_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, + IWL_TX_FIFO_BK_IPAN, + IWL_TX_FIFO_BE_IPAN, + IWL_TX_FIFO_VI_IPAN, + IWL_TX_FIFO_VO_IPAN, + IWL_TX_FIFO_BE_IPAN, + IWL_TX_FIFO_UNUSED, + IWL_TX_FIFO_AUX, +}; + +static int iwl_alive_notify(struct iwl_priv *priv) +{ + const u8 *queue_to_txf; + u8 n_queues; + int ret; + int i; + + iwl_trans_fw_alive(priv->trans, 0); + + if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN && + priv->nvm_data->sku_cap_ipan_enable) { + n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo); + queue_to_txf = iwlagn_ipan_queue_to_tx_fifo; + } else { + n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); + queue_to_txf = iwlagn_default_queue_to_tx_fifo; + } + + for (i = 0; i < n_queues; i++) + if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED) + iwl_trans_ac_txq_enable(priv->trans, i, + queue_to_txf[i], 0); + + priv->passive_no_rx = false; + priv->transport_queue_stop = 0; + + ret = iwl_send_wimax_coex(priv); + if (ret) + return ret; + + if (!priv->lib->no_xtal_calib) { + ret = iwl_set_Xtal_calib(priv); + if (ret) + return ret; + } + + return iwl_send_calib_results(priv); +} + +struct iwl_alive_data { + bool valid; + u8 subtype; +}; + +static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_priv *priv = + container_of(notif_wait, struct iwl_priv, notif_wait); + struct iwl_alive_data *alive_data = data; + struct iwl_alive_resp *palive; + + palive = (void *)pkt->data; + + IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + priv->device_pointers.error_event_table = + le32_to_cpu(palive->error_event_table_ptr); + priv->device_pointers.log_event_table = + le32_to_cpu(palive->log_event_table_ptr); + + alive_data->subtype = palive->ver_subtype; + alive_data->valid = palive->is_valid == UCODE_VALID_OK; + + return true; +} + +#define UCODE_ALIVE_TIMEOUT HZ +#define UCODE_CALIB_TIMEOUT (2*HZ) + +int iwl_load_ucode_wait_alive(struct iwl_priv *priv, + enum iwl_ucode_type ucode_type) +{ + struct iwl_notification_wait alive_wait; + struct iwl_alive_data alive_data; + const struct fw_img *fw; + int ret; + enum iwl_ucode_type old_type; + static const u16 alive_cmd[] = { REPLY_ALIVE }; + + fw = iwl_get_ucode_image(priv, ucode_type); + if (WARN_ON(!fw)) + return -EINVAL; + + old_type = priv->cur_ucode; + priv->cur_ucode = ucode_type; + priv->ucode_loaded = false; + + iwl_init_notification_wait(&priv->notif_wait, &alive_wait, + alive_cmd, ARRAY_SIZE(alive_cmd), + iwl_alive_fn, &alive_data); + + ret = iwl_trans_start_fw(priv->trans, fw, false); + if (ret) { + priv->cur_ucode = old_type; + iwl_remove_notification(&priv->notif_wait, &alive_wait); + return ret; + } + + /* + * Some things may run in the background now, but we + * just wait for the ALIVE notification here. + */ + ret = iwl_wait_notification(&priv->notif_wait, &alive_wait, + UCODE_ALIVE_TIMEOUT); + if (ret) { + priv->cur_ucode = old_type; + return ret; + } + + if (!alive_data.valid) { + IWL_ERR(priv, "Loaded ucode is not valid!\n"); + priv->cur_ucode = old_type; + return -EIO; + } + + priv->ucode_loaded = true; + + if (ucode_type != IWL_UCODE_WOWLAN) { + /* delay a bit to give rfkill time to run */ + msleep(5); + } + + ret = iwl_alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition: %d\n", ret); + priv->cur_ucode = old_type; + return ret; + } + + return 0; +} + +static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_priv *priv = data; + struct iwl_calib_hdr *hdr; + + if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) { + WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION); + return true; + } + + hdr = (struct iwl_calib_hdr *)pkt->data; + + if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt))) + IWL_ERR(priv, "Failed to record calibration data %d\n", + hdr->op_code); + + return false; +} + +int iwl_run_init_ucode(struct iwl_priv *priv) +{ + struct iwl_notification_wait calib_wait; + static const u16 calib_complete[] = { + CALIBRATION_RES_NOTIFICATION, + CALIBRATION_COMPLETE_NOTIFICATION + }; + int ret; + + lockdep_assert_held(&priv->mutex); + + /* No init ucode required? Curious, but maybe ok */ + if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) + return 0; + + iwl_init_notification_wait(&priv->notif_wait, &calib_wait, + calib_complete, ARRAY_SIZE(calib_complete), + iwlagn_wait_calib, priv); + + /* Will also start the device */ + ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT); + if (ret) + goto error; + + ret = iwl_init_alive_start(priv); + if (ret) + goto error; + + /* + * Some things may run in the background now, but we + * just wait for the calibration complete notification. + */ + ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, + UCODE_CALIB_TIMEOUT); + + goto out; + + error: + iwl_remove_notification(&priv->notif_wait, &calib_wait); + out: + /* Whatever happened, stop the device */ + iwl_trans_stop_device(priv->trans); + priv->ucode_loaded = false; + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c new file mode 100644 index 000000000000..06f6cc08f451 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c @@ -0,0 +1,140 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include "iwl-config.h" +#include "iwl-csr.h" +#include "iwl-agn-hw.h" + +/* Highest firmware API version supported */ +#define IWL1000_UCODE_API_MAX 5 +#define IWL100_UCODE_API_MAX 5 + +/* Oldest version we won't warn about */ +#define IWL1000_UCODE_API_OK 5 +#define IWL100_UCODE_API_OK 5 + +/* Lowest firmware API version supported */ +#define IWL1000_UCODE_API_MIN 1 +#define IWL100_UCODE_API_MIN 5 + +/* EEPROM version */ +#define EEPROM_1000_TX_POWER_VERSION (4) +#define EEPROM_1000_EEPROM_VERSION (0x15C) + +#define IWL1000_FW_PRE "iwlwifi-1000-" +#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" + +#define IWL100_FW_PRE "iwlwifi-100-" +#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" + + +static const struct iwl_base_params iwl1000_base_params = { + .num_of_queues = IWLAGN_NUM_QUEUES, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .max_ll_items = OTP_MAX_LL_ITEMS_1000, + .shadow_ram_support = false, + .led_compensation = 51, + .wd_timeout = IWL_WATCHDOG_DISABLED, + .max_event_log_size = 128, + .scd_chain_ext_wa = true, +}; + +static const struct iwl_ht_params iwl1000_ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(IEEE80211_BAND_2GHZ), +}; + +static const struct iwl_eeprom_params iwl1000_eeprom_params = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_REG_BAND_24_HT40_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + } +}; + +#define IWL_DEVICE_1000 \ + .fw_name_pre = IWL1000_FW_PRE, \ + .ucode_api_max = IWL1000_UCODE_API_MAX, \ + .ucode_api_ok = IWL1000_UCODE_API_OK, \ + .ucode_api_min = IWL1000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_1000, \ + .max_inst_size = IWLAGN_RTC_INST_SIZE, \ + .max_data_size = IWLAGN_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ + .base_params = &iwl1000_base_params, \ + .eeprom_params = &iwl1000_eeprom_params, \ + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl1000_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", + IWL_DEVICE_1000, + .ht_params = &iwl1000_ht_params, +}; + +const struct iwl_cfg iwl1000_bg_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", + IWL_DEVICE_1000, +}; + +#define IWL_DEVICE_100 \ + .fw_name_pre = IWL100_FW_PRE, \ + .ucode_api_max = IWL100_UCODE_API_MAX, \ + .ucode_api_ok = IWL100_UCODE_API_OK, \ + .ucode_api_min = IWL100_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_100, \ + .max_inst_size = IWLAGN_RTC_INST_SIZE, \ + .max_data_size = IWLAGN_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ + .base_params = &iwl1000_base_params, \ + .eeprom_params = &iwl1000_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl100_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", + IWL_DEVICE_100, + .ht_params = &iwl1000_ht_params, +}; + +const struct iwl_cfg iwl100_bg_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 100 BG", + IWL_DEVICE_100, +}; + +MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c new file mode 100644 index 000000000000..890b95f497d6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c @@ -0,0 +1,216 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include "iwl-config.h" +#include "iwl-agn-hw.h" +#include "dvm/commands.h" /* needed for BT for now */ + +/* Highest firmware API version supported */ +#define IWL2030_UCODE_API_MAX 6 +#define IWL2000_UCODE_API_MAX 6 +#define IWL105_UCODE_API_MAX 6 +#define IWL135_UCODE_API_MAX 6 + +/* Oldest version we won't warn about */ +#define IWL2030_UCODE_API_OK 6 +#define IWL2000_UCODE_API_OK 6 +#define IWL105_UCODE_API_OK 6 +#define IWL135_UCODE_API_OK 6 + +/* Lowest firmware API version supported */ +#define IWL2030_UCODE_API_MIN 5 +#define IWL2000_UCODE_API_MIN 5 +#define IWL105_UCODE_API_MIN 5 +#define IWL135_UCODE_API_MIN 5 + +/* EEPROM version */ +#define EEPROM_2000_TX_POWER_VERSION (6) +#define EEPROM_2000_EEPROM_VERSION (0x805) + + +#define IWL2030_FW_PRE "iwlwifi-2030-" +#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" + +#define IWL2000_FW_PRE "iwlwifi-2000-" +#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode" + +#define IWL105_FW_PRE "iwlwifi-105-" +#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode" + +#define IWL135_FW_PRE "iwlwifi-135-" +#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" + +static const struct iwl_base_params iwl2000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = 0, + .max_ll_items = OTP_MAX_LL_ITEMS_2x00, + .shadow_ram_support = true, + .led_compensation = 51, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .scd_chain_ext_wa = true, +}; + + +static const struct iwl_base_params iwl2030_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = 0, + .max_ll_items = OTP_MAX_LL_ITEMS_2x00, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .scd_chain_ext_wa = true, +}; + +static const struct iwl_ht_params iwl2000_ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(IEEE80211_BAND_2GHZ), +}; + +static const struct iwl_eeprom_params iwl20x0_eeprom_params = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .enhanced_txpower = true, +}; + +#define IWL_DEVICE_2000 \ + .fw_name_pre = IWL2000_FW_PRE, \ + .ucode_api_max = IWL2000_UCODE_API_MAX, \ + .ucode_api_ok = IWL2000_UCODE_API_OK, \ + .ucode_api_min = IWL2000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_2000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .base_params = &iwl2000_base_params, \ + .eeprom_params = &iwl20x0_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + + +const struct iwl_cfg iwl2000_2bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", + IWL_DEVICE_2000, + .ht_params = &iwl2000_ht_params, +}; + +const struct iwl_cfg iwl2000_2bgn_d_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN", + IWL_DEVICE_2000, + .ht_params = &iwl2000_ht_params, +}; + +#define IWL_DEVICE_2030 \ + .fw_name_pre = IWL2030_FW_PRE, \ + .ucode_api_max = IWL2030_UCODE_API_MAX, \ + .ucode_api_ok = IWL2030_UCODE_API_OK, \ + .ucode_api_min = IWL2030_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_2030, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .base_params = &iwl2030_base_params, \ + .eeprom_params = &iwl20x0_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl2030_2bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", + IWL_DEVICE_2030, + .ht_params = &iwl2000_ht_params, +}; + +#define IWL_DEVICE_105 \ + .fw_name_pre = IWL105_FW_PRE, \ + .ucode_api_max = IWL105_UCODE_API_MAX, \ + .ucode_api_ok = IWL105_UCODE_API_OK, \ + .ucode_api_min = IWL105_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_105, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .base_params = &iwl2000_base_params, \ + .eeprom_params = &iwl20x0_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl105_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", + IWL_DEVICE_105, + .ht_params = &iwl2000_ht_params, +}; + +const struct iwl_cfg iwl105_bgn_d_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 105D BGN", + IWL_DEVICE_105, + .ht_params = &iwl2000_ht_params, +}; + +#define IWL_DEVICE_135 \ + .fw_name_pre = IWL135_FW_PRE, \ + .ucode_api_max = IWL135_UCODE_API_MAX, \ + .ucode_api_ok = IWL135_UCODE_API_OK, \ + .ucode_api_min = IWL135_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_135, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .base_params = &iwl2030_base_params, \ + .eeprom_params = &iwl20x0_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl135_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", + IWL_DEVICE_135, + .ht_params = &iwl2000_ht_params, +}; + +MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); +MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); +MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c new file mode 100644 index 000000000000..724194e23414 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c @@ -0,0 +1,178 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include "iwl-config.h" +#include "iwl-agn-hw.h" +#include "iwl-csr.h" + +/* Highest firmware API version supported */ +#define IWL5000_UCODE_API_MAX 5 +#define IWL5150_UCODE_API_MAX 2 + +/* Oldest version we won't warn about */ +#define IWL5000_UCODE_API_OK 5 +#define IWL5150_UCODE_API_OK 2 + +/* Lowest firmware API version supported */ +#define IWL5000_UCODE_API_MIN 1 +#define IWL5150_UCODE_API_MIN 1 + +/* EEPROM versions */ +#define EEPROM_5000_TX_POWER_VERSION (4) +#define EEPROM_5000_EEPROM_VERSION (0x11A) +#define EEPROM_5050_TX_POWER_VERSION (4) +#define EEPROM_5050_EEPROM_VERSION (0x21E) + +#define IWL5000_FW_PRE "iwlwifi-5000-" +#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" + +#define IWL5150_FW_PRE "iwlwifi-5150-" +#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" + +static const struct iwl_base_params iwl5000_base_params = { + .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .led_compensation = 51, + .wd_timeout = IWL_WATCHDOG_DISABLED, + .max_event_log_size = 512, + .scd_chain_ext_wa = true, +}; + +static const struct iwl_ht_params iwl5000_ht_params = { + .ht_greenfield_support = true, + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +static const struct iwl_eeprom_params iwl5000_eeprom_params = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, +}; + +#define IWL_DEVICE_5000 \ + .fw_name_pre = IWL5000_FW_PRE, \ + .ucode_api_max = IWL5000_UCODE_API_MAX, \ + .ucode_api_ok = IWL5000_UCODE_API_OK, \ + .ucode_api_min = IWL5000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_5000, \ + .max_inst_size = IWLAGN_RTC_INST_SIZE, \ + .max_data_size = IWLAGN_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_5000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ + .base_params = &iwl5000_base_params, \ + .eeprom_params = &iwl5000_eeprom_params, \ + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl5300_agn_cfg = { + .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", + IWL_DEVICE_5000, + /* at least EEPROM 0x11A has wrong info */ + .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ + .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ + .ht_params = &iwl5000_ht_params, +}; + +const struct iwl_cfg iwl5100_bgn_cfg = { + .name = "Intel(R) WiFi Link 5100 BGN", + IWL_DEVICE_5000, + .valid_tx_ant = ANT_B, /* .cfg overwrite */ + .valid_rx_ant = ANT_AB, /* .cfg overwrite */ + .ht_params = &iwl5000_ht_params, +}; + +const struct iwl_cfg iwl5100_abg_cfg = { + .name = "Intel(R) WiFi Link 5100 ABG", + IWL_DEVICE_5000, + .valid_tx_ant = ANT_B, /* .cfg overwrite */ + .valid_rx_ant = ANT_AB, /* .cfg overwrite */ +}; + +const struct iwl_cfg iwl5100_agn_cfg = { + .name = "Intel(R) WiFi Link 5100 AGN", + IWL_DEVICE_5000, + .valid_tx_ant = ANT_B, /* .cfg overwrite */ + .valid_rx_ant = ANT_AB, /* .cfg overwrite */ + .ht_params = &iwl5000_ht_params, +}; + +const struct iwl_cfg iwl5350_agn_cfg = { + .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", + .fw_name_pre = IWL5000_FW_PRE, + .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_ok = IWL5000_UCODE_API_OK, + .ucode_api_min = IWL5000_UCODE_API_MIN, + .device_family = IWL_DEVICE_FAMILY_5000, + .max_inst_size = IWLAGN_RTC_INST_SIZE, + .max_data_size = IWLAGN_RTC_DATA_SIZE, + .nvm_ver = EEPROM_5050_EEPROM_VERSION, + .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, + .base_params = &iwl5000_base_params, + .eeprom_params = &iwl5000_eeprom_params, + .ht_params = &iwl5000_ht_params, + .led_mode = IWL_LED_BLINK, + .internal_wimax_coex = true, +}; + +#define IWL_DEVICE_5150 \ + .fw_name_pre = IWL5150_FW_PRE, \ + .ucode_api_max = IWL5150_UCODE_API_MAX, \ + .ucode_api_ok = IWL5150_UCODE_API_OK, \ + .ucode_api_min = IWL5150_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_5150, \ + .max_inst_size = IWLAGN_RTC_INST_SIZE, \ + .max_data_size = IWLAGN_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_5050_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ + .base_params = &iwl5000_base_params, \ + .eeprom_params = &iwl5000_eeprom_params, \ + .led_mode = IWL_LED_BLINK, \ + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl5150_agn_cfg = { + .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", + IWL_DEVICE_5150, + .ht_params = &iwl5000_ht_params, + +}; + +const struct iwl_cfg iwl5150_abg_cfg = { + .name = "Intel(R) WiMAX/WiFi Link 5150 ABG", + IWL_DEVICE_5150, +}; + +MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c new file mode 100644 index 000000000000..21b2630763dc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -0,0 +1,389 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include "iwl-config.h" +#include "iwl-agn-hw.h" +#include "dvm/commands.h" /* needed for BT for now */ + +/* Highest firmware API version supported */ +#define IWL6000_UCODE_API_MAX 6 +#define IWL6050_UCODE_API_MAX 5 +#define IWL6000G2_UCODE_API_MAX 6 +#define IWL6035_UCODE_API_MAX 6 + +/* Oldest version we won't warn about */ +#define IWL6000_UCODE_API_OK 4 +#define IWL6000G2_UCODE_API_OK 5 +#define IWL6050_UCODE_API_OK 5 +#define IWL6000G2B_UCODE_API_OK 6 +#define IWL6035_UCODE_API_OK 6 + +/* Lowest firmware API version supported */ +#define IWL6000_UCODE_API_MIN 4 +#define IWL6050_UCODE_API_MIN 4 +#define IWL6000G2_UCODE_API_MIN 5 +#define IWL6035_UCODE_API_MIN 6 + +/* EEPROM versions */ +#define EEPROM_6000_TX_POWER_VERSION (4) +#define EEPROM_6000_EEPROM_VERSION (0x423) +#define EEPROM_6050_TX_POWER_VERSION (4) +#define EEPROM_6050_EEPROM_VERSION (0x532) +#define EEPROM_6150_TX_POWER_VERSION (6) +#define EEPROM_6150_EEPROM_VERSION (0x553) +#define EEPROM_6005_TX_POWER_VERSION (6) +#define EEPROM_6005_EEPROM_VERSION (0x709) +#define EEPROM_6030_TX_POWER_VERSION (6) +#define EEPROM_6030_EEPROM_VERSION (0x709) +#define EEPROM_6035_TX_POWER_VERSION (6) +#define EEPROM_6035_EEPROM_VERSION (0x753) + +#define IWL6000_FW_PRE "iwlwifi-6000-" +#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode" + +#define IWL6050_FW_PRE "iwlwifi-6050-" +#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode" + +#define IWL6005_FW_PRE "iwlwifi-6000g2a-" +#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode" + +#define IWL6030_FW_PRE "iwlwifi-6000g2b-" +#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" + +static const struct iwl_base_params iwl6000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = 0, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 51, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .scd_chain_ext_wa = true, +}; + +static const struct iwl_base_params iwl6050_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = 0, + .max_ll_items = OTP_MAX_LL_ITEMS_6x50, + .shadow_ram_support = true, + .led_compensation = 51, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .max_event_log_size = 1024, + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .scd_chain_ext_wa = true, +}; + +static const struct iwl_base_params iwl6000_g2_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .pll_cfg_val = 0, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .scd_chain_ext_wa = true, +}; + +static const struct iwl_ht_params iwl6000_ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +static const struct iwl_eeprom_params iwl6000_eeprom_params = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, + .enhanced_txpower = true, +}; + +#define IWL_DEVICE_6005 \ + .fw_name_pre = IWL6005_FW_PRE, \ + .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ + .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ + .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6005, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_6005_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ + .base_params = &iwl6000_g2_base_params, \ + .eeprom_params = &iwl6000_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl6005_2agn_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", + IWL_DEVICE_6005, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6005_2abg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG", + IWL_DEVICE_6005, +}; + +const struct iwl_cfg iwl6005_2bg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6205 BG", + IWL_DEVICE_6005, +}; + +const struct iwl_cfg iwl6005_2agn_sff_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN", + IWL_DEVICE_6005, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6005_2agn_d_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN", + IWL_DEVICE_6005, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6005_2agn_mow1_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN", + IWL_DEVICE_6005, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6005_2agn_mow2_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN", + IWL_DEVICE_6005, + .ht_params = &iwl6000_ht_params, +}; + +#define IWL_DEVICE_6030 \ + .fw_name_pre = IWL6030_FW_PRE, \ + .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ + .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ + .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6030, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ + .base_params = &iwl6000_g2_base_params, \ + .eeprom_params = &iwl6000_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl6030_2agn_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", + IWL_DEVICE_6030, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6030_2abg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG", + IWL_DEVICE_6030, +}; + +const struct iwl_cfg iwl6030_2bgn_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN", + IWL_DEVICE_6030, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6030_2bg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6230 BG", + IWL_DEVICE_6030, +}; + +#define IWL_DEVICE_6035 \ + .fw_name_pre = IWL6030_FW_PRE, \ + .ucode_api_max = IWL6035_UCODE_API_MAX, \ + .ucode_api_ok = IWL6035_UCODE_API_OK, \ + .ucode_api_min = IWL6035_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6030, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ + .base_params = &iwl6000_g2_base_params, \ + .eeprom_params = &iwl6000_eeprom_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl6035_2agn_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", + IWL_DEVICE_6035, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6035_2agn_sff_cfg = { + .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN", + IWL_DEVICE_6035, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl1030_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", + IWL_DEVICE_6030, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl1030_bg_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 1030 BG", + IWL_DEVICE_6030, +}; + +const struct iwl_cfg iwl130_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 130 BGN", + IWL_DEVICE_6030, + .ht_params = &iwl6000_ht_params, + .rx_with_siso_diversity = true, +}; + +const struct iwl_cfg iwl130_bg_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N 130 BG", + IWL_DEVICE_6030, + .rx_with_siso_diversity = true, +}; + +/* + * "i": Internal configuration, use internal Power Amplifier + */ +#define IWL_DEVICE_6000i \ + .fw_name_pre = IWL6000_FW_PRE, \ + .ucode_api_max = IWL6000_UCODE_API_MAX, \ + .ucode_api_ok = IWL6000_UCODE_API_OK, \ + .ucode_api_min = IWL6000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6000i, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ + .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ + .nvm_ver = EEPROM_6000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ + .base_params = &iwl6000_base_params, \ + .eeprom_params = &iwl6000_eeprom_params, \ + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl6000i_2agn_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", + IWL_DEVICE_6000i, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6000i_2abg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG", + IWL_DEVICE_6000i, +}; + +const struct iwl_cfg iwl6000i_2bg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N 6200 BG", + IWL_DEVICE_6000i, +}; + +#define IWL_DEVICE_6050 \ + .fw_name_pre = IWL6050_FW_PRE, \ + .ucode_api_max = IWL6050_UCODE_API_MAX, \ + .ucode_api_min = IWL6050_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6050, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ + .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ + .nvm_ver = EEPROM_6050_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ + .base_params = &iwl6050_base_params, \ + .eeprom_params = &iwl6000_eeprom_params, \ + .led_mode = IWL_LED_BLINK, \ + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl6050_2agn_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", + IWL_DEVICE_6050, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6050_2abg_cfg = { + .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG", + IWL_DEVICE_6050, +}; + +#define IWL_DEVICE_6150 \ + .fw_name_pre = IWL6050_FW_PRE, \ + .ucode_api_max = IWL6050_UCODE_API_MAX, \ + .ucode_api_min = IWL6050_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6150, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .nvm_ver = EEPROM_6150_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ + .base_params = &iwl6050_base_params, \ + .eeprom_params = &iwl6000_eeprom_params, \ + .led_mode = IWL_LED_BLINK, \ + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + +const struct iwl_cfg iwl6150_bgn_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", + IWL_DEVICE_6150, + .ht_params = &iwl6000_ht_params, +}; + +const struct iwl_cfg iwl6150_bg_cfg = { + .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG", + IWL_DEVICE_6150, +}; + +const struct iwl_cfg iwl6000_3agn_cfg = { + .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", + .fw_name_pre = IWL6000_FW_PRE, + .ucode_api_max = IWL6000_UCODE_API_MAX, + .ucode_api_ok = IWL6000_UCODE_API_OK, + .ucode_api_min = IWL6000_UCODE_API_MIN, + .device_family = IWL_DEVICE_FAMILY_6000, + .max_inst_size = IWL60_RTC_INST_SIZE, + .max_data_size = IWL60_RTC_DATA_SIZE, + .nvm_ver = EEPROM_6000_EEPROM_VERSION, + .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, + .base_params = &iwl6000_base_params, + .eeprom_params = &iwl6000_eeprom_params, + .ht_params = &iwl6000_ht_params, + .led_mode = IWL_LED_BLINK, +}; + +MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c new file mode 100644 index 000000000000..1a73c7a1da77 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -0,0 +1,346 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include "iwl-config.h" +#include "iwl-agn-hw.h" + +/* Highest firmware API version supported */ +#define IWL7260_UCODE_API_MAX 17 + +/* Oldest version we won't warn about */ +#define IWL7260_UCODE_API_OK 13 + +/* Lowest firmware API version supported */ +#define IWL7260_UCODE_API_MIN 13 + +/* NVM versions */ +#define IWL7260_NVM_VERSION 0x0a1d +#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3160_NVM_VERSION 0x709 +#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3165_NVM_VERSION 0x709 +#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL7265_NVM_VERSION 0x0a1d +#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL7265D_NVM_VERSION 0x0c11 +#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ + +/* DCCM offsets and lengths */ +#define IWL7000_DCCM_OFFSET 0x800000 +#define IWL7260_DCCM_LEN 0x14000 +#define IWL3160_DCCM_LEN 0x10000 +#define IWL7265_DCCM_LEN 0x17A00 + +#define IWL7260_FW_PRE "iwlwifi-7260-" +#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" + +#define IWL3160_FW_PRE "iwlwifi-3160-" +#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" + +#define IWL7265_FW_PRE "iwlwifi-7265-" +#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" + +#define IWL7265D_FW_PRE "iwlwifi-7265D-" +#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" + +#define NVM_HW_SECTION_NUM_FAMILY_7000 0 + +static const struct iwl_base_params iwl7000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, + .num_of_queues = 31, + .pll_cfg_val = 0, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, + .apmg_wake_up_wa = true, +}; + +static const struct iwl_tt_params iwl7000_high_temp_tt_params = { + .ct_kill_entry = 118, + .ct_kill_exit = 96, + .ct_kill_duration = 5, + .dynamic_smps_entry = 114, + .dynamic_smps_exit = 110, + .tx_protection_entry = 114, + .tx_protection_exit = 108, + .tx_backoff = { + {.temperature = 112, .backoff = 300}, + {.temperature = 113, .backoff = 800}, + {.temperature = 114, .backoff = 1500}, + {.temperature = 115, .backoff = 3000}, + {.temperature = 116, .backoff = 5000}, + {.temperature = 117, .backoff = 10000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +static const struct iwl_ht_params iwl7000_ht_params = { + .stbc = true, + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +#define IWL_DEVICE_7000 \ + .ucode_api_max = IWL7260_UCODE_API_MAX, \ + .ucode_api_ok = IWL7260_UCODE_API_OK, \ + .ucode_api_min = IWL7260_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_7000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl7000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ + .non_shared_ant = ANT_A, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .dccm_offset = IWL7000_DCCM_OFFSET + +const struct iwl_cfg iwl7260_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, + .lp_xtal_workaround = true, + .dccm_len = IWL7260_DCCM_LEN, +}; + +const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { + .name = "Intel(R) Dual Band Wireless AC 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .high_temp = true, + .host_interrupt_operation_mode = true, + .lp_xtal_workaround = true, + .dccm_len = IWL7260_DCCM_LEN, + .thermal_params = &iwl7000_high_temp_tt_params, +}; + +const struct iwl_cfg iwl7260_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, + .lp_xtal_workaround = true, + .dccm_len = IWL7260_DCCM_LEN, +}; + +const struct iwl_cfg iwl7260_n_cfg = { + .name = "Intel(R) Wireless N 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, + .lp_xtal_workaround = true, + .dccm_len = IWL7260_DCCM_LEN, +}; + +const struct iwl_cfg iwl3160_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, + .dccm_len = IWL3160_DCCM_LEN, +}; + +const struct iwl_cfg iwl3160_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, + .dccm_len = IWL3160_DCCM_LEN, +}; + +const struct iwl_cfg iwl3160_n_cfg = { + .name = "Intel(R) Wireless N 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, + .dccm_len = IWL3160_DCCM_LEN, +}; + +static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { + {.pwr = 1600, .backoff = 0}, + {.pwr = 1300, .backoff = 467}, + {.pwr = 900, .backoff = 1900}, + {.pwr = 800, .backoff = 2630}, + {.pwr = 700, .backoff = 3720}, + {.pwr = 600, .backoff = 5550}, + {.pwr = 500, .backoff = 9350}, + {0}, +}; + +static const struct iwl_ht_params iwl7265_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +const struct iwl_cfg iwl3165_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3165", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3165_NVM_VERSION, + .nvm_calib_ver = IWL3165_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl7265_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl7265_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl7265_n_cfg = { + .name = "Intel(R) Wireless N 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl7265d_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 7265", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265D_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl7265d_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7265", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265D_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +const struct iwl_cfg iwl7265d_n_cfg = { + .name = "Intel(R) Wireless N 7265", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265D_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, + .dccm_len = IWL7265_DCCM_LEN, +}; + +MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c new file mode 100644 index 000000000000..0116e5a4c393 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -0,0 +1,229 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include "iwl-config.h" +#include "iwl-agn-hw.h" + +/* Highest firmware API version supported */ +#define IWL8000_UCODE_API_MAX 17 + +/* Oldest version we won't warn about */ +#define IWL8000_UCODE_API_OK 13 + +/* Lowest firmware API version supported */ +#define IWL8000_UCODE_API_MIN 13 + +/* NVM versions */ +#define IWL8000_NVM_VERSION 0x0a1d +#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ + +/* Memory offsets and lengths */ +#define IWL8260_DCCM_OFFSET 0x800000 +#define IWL8260_DCCM_LEN 0x18000 +#define IWL8260_DCCM2_OFFSET 0x880000 +#define IWL8260_DCCM2_LEN 0x8000 +#define IWL8260_SMEM_OFFSET 0x400000 +#define IWL8260_SMEM_LEN 0x68000 + +#define IWL8000_FW_PRE "iwlwifi-8000" +#define IWL8000_MODULE_FIRMWARE(api) \ + IWL8000_FW_PRE "-" __stringify(api) ".ucode" + +#define NVM_HW_SECTION_NUM_FAMILY_8000 10 +#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" +#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" + +/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */ +#define MAX_RX_AGG_SIZE_8260_SDIO 21 +#define MAX_TX_AGG_SIZE_8260_SDIO 40 + +/* Max A-MPDU exponent for HT and VHT */ +#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K +#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K + +static const struct iwl_base_params iwl8000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, + .num_of_queues = 31, + .pll_cfg_val = 0, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +static const struct iwl_ht_params iwl8000_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + +static const struct iwl_tt_params iwl8000_tt_params = { + .ct_kill_entry = 115, + .ct_kill_exit = 93, + .ct_kill_duration = 5, + .dynamic_smps_entry = 111, + .dynamic_smps_exit = 107, + .tx_protection_entry = 112, + .tx_protection_exit = 105, + .tx_backoff = { + {.temperature = 110, .backoff = 200}, + {.temperature = 111, .backoff = 600}, + {.temperature = 112, .backoff = 1200}, + {.temperature = 113, .backoff = 2000}, + {.temperature = 114, .backoff = 4000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +#define IWL_DEVICE_8000 \ + .ucode_api_max = IWL8000_UCODE_API_MAX, \ + .ucode_api_ok = IWL8000_UCODE_API_OK, \ + .ucode_api_min = IWL8000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_8000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl8000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ + .d0i3 = true, \ + .features = NETIF_F_RXCSUM, \ + .non_shared_ant = ANT_A, \ + .dccm_offset = IWL8260_DCCM_OFFSET, \ + .dccm_len = IWL8260_DCCM_LEN, \ + .dccm2_offset = IWL8260_DCCM2_OFFSET, \ + .dccm2_len = IWL8260_DCCM2_LEN, \ + .smem_offset = IWL8260_SMEM_OFFSET, \ + .smem_len = IWL8260_SMEM_LEN, \ + .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \ + .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ + .thermal_params = &iwl8000_tt_params, \ + .apmg_not_supported = true + +const struct iwl_cfg iwl8260_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 8260", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl8260_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 8260", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl4165_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 4165", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl8260_2ac_sdio_cfg = { + .name = "Intel(R) Dual Band Wireless-AC 8260", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, + .disable_dummy_notification = true, + .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, + .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, +}; + +const struct iwl_cfg iwl4165_2ac_sdio_cfg = { + .name = "Intel(R) Dual Band Wireless-AC 4165", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, + .bt_shared_single_ant = true, + .disable_dummy_notification = true, + .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, + .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, +}; + +MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h new file mode 100644 index 000000000000..04a483d38659 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h @@ -0,0 +1,117 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-agn-hw.h) only for hardware-related definitions. + */ + +#ifndef __iwl_agn_hw_h__ +#define __iwl_agn_hw_h__ + +#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000) +#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000) + +#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000) +#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000) + +#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \ + IWLAGN_RTC_INST_LOWER_BOUND) +#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \ + IWLAGN_RTC_DATA_LOWER_BOUND) + +#define IWL60_RTC_INST_LOWER_BOUND (0x000000) +#define IWL60_RTC_INST_UPPER_BOUND (0x040000) +#define IWL60_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL60_RTC_DATA_UPPER_BOUND (0x814000) +#define IWL60_RTC_INST_SIZE \ + (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND) +#define IWL60_RTC_DATA_SIZE \ + (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND) + +/* RSSI to dBm */ +#define IWLAGN_RSSI_OFFSET 44 + +#define IWLAGN_DEFAULT_TX_RETRY 15 +#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3 +#define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60 +#define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60 +#define IWLAGN_LOW_RETRY_LIMIT 7 + +/* Limit range of txpower output target to be between these values */ +#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ +#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ + +/* EEPROM */ +#define IWLAGN_EEPROM_IMG_SIZE 2048 + +/* high blocks contain PAPD data */ +#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ +#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ +#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ +#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ +#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ +#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ + + +#define IWLAGN_NUM_QUEUES 20 + +#endif /* __iwl_agn_hw_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h new file mode 100644 index 000000000000..910970858f98 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -0,0 +1,437 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __IWL_CONFIG_H__ +#define __IWL_CONFIG_H__ + +#include +#include + + +enum iwl_device_family { + IWL_DEVICE_FAMILY_UNDEFINED, + IWL_DEVICE_FAMILY_1000, + IWL_DEVICE_FAMILY_100, + IWL_DEVICE_FAMILY_2000, + IWL_DEVICE_FAMILY_2030, + IWL_DEVICE_FAMILY_105, + IWL_DEVICE_FAMILY_135, + IWL_DEVICE_FAMILY_5000, + IWL_DEVICE_FAMILY_5150, + IWL_DEVICE_FAMILY_6000, + IWL_DEVICE_FAMILY_6000i, + IWL_DEVICE_FAMILY_6005, + IWL_DEVICE_FAMILY_6030, + IWL_DEVICE_FAMILY_6050, + IWL_DEVICE_FAMILY_6150, + IWL_DEVICE_FAMILY_7000, + IWL_DEVICE_FAMILY_8000, +}; + +static inline bool iwl_has_secure_boot(u32 hw_rev, + enum iwl_device_family family) +{ + /* return 1 only for family 8000 B0 */ + if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC)) + return true; + + return false; +} + +/* + * LED mode + * IWL_LED_DEFAULT: use device default + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IWL_LED_BLINK: adjust led blink rate based on blink table + * IWL_LED_DISABLE: led disabled + */ +enum iwl_led_mode { + IWL_LED_DEFAULT, + IWL_LED_RF_STATE, + IWL_LED_BLINK, + IWL_LED_DISABLE, +}; + +/* + * This is the threshold value of plcp error rate per 100mSecs. It is + * used to set and check for the validity of plcp_delta. + */ +#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1 +#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50 +#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100 +#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200 +#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255 +#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 + +/* TX queue watchdog timeouts in mSecs */ +#define IWL_WATCHDOG_DISABLED 0 +#define IWL_DEF_WD_TIMEOUT 2500 +#define IWL_LONG_WD_TIMEOUT 10000 +#define IWL_MAX_WD_TIMEOUT 120000 + +#define IWL_DEFAULT_MAX_TX_POWER 22 + +/* Antenna presence definitions */ +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_C BIT(2) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_A | ANT_B | ANT_C) + +static inline u8 num_of_ant(u8 mask) +{ + return !!((mask) & ANT_A) + + !!((mask) & ANT_B) + + !!((mask) & ANT_C); +} + +/* + * @max_ll_items: max number of OTP blocks + * @shadow_ram_support: shadow support for OTP memory + * @led_compensation: compensate on the led on/off time per HW according + * to the deviation to achieve the desired led frequency. + * The detail algorithm is described in iwl-led.c + * @wd_timeout: TX queues watchdog timeout + * @max_event_log_size: size of event log buffer size for ucode event logging + * @shadow_reg_enable: HW shadow register support + * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command + * is in flight. This is due to a HW bug in 7260, 3160 and 7265. + * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. + */ +struct iwl_base_params { + int eeprom_size; + int num_of_queues; /* def: HW dependent */ + /* for iwl_pcie_apm_init() */ + u32 pll_cfg_val; + + const u16 max_ll_items; + const bool shadow_ram_support; + u16 led_compensation; + unsigned int wd_timeout; + u32 max_event_log_size; + const bool shadow_reg_enable; + const bool pcie_l1_allowed; + const bool apmg_wake_up_wa; + const bool scd_chain_ext_wa; +}; + +/* + * @stbc: support Tx STBC and 1*SS Rx STBC + * @ldpc: support Tx/Rx with LDPC + * @use_rts_for_aggregation: use rts/cts protection for HT traffic + * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40 + */ +struct iwl_ht_params { + enum ieee80211_smps_mode smps_mode; + const bool ht_greenfield_support; /* if used set to true */ + const bool stbc; + const bool ldpc; + bool use_rts_for_aggregation; + u8 ht40_bands; +}; + +/* + * Tx-backoff threshold + * @temperature: The threshold in Celsius + * @backoff: The tx-backoff in uSec + */ +struct iwl_tt_tx_backoff { + s32 temperature; + u32 backoff; +}; + +#define TT_TX_BACKOFF_SIZE 6 + +/** + * struct iwl_tt_params - thermal throttling parameters + * @ct_kill_entry: CT Kill entry threshold + * @ct_kill_exit: CT Kill exit threshold + * @ct_kill_duration: The time intervals (in uSec) in which the driver needs + * to checks whether to exit CT Kill. + * @dynamic_smps_entry: Dynamic SMPS entry threshold + * @dynamic_smps_exit: Dynamic SMPS exit threshold + * @tx_protection_entry: TX protection entry threshold + * @tx_protection_exit: TX protection exit threshold + * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. + * @support_ct_kill: Support CT Kill? + * @support_dynamic_smps: Support dynamic SMPS? + * @support_tx_protection: Support tx protection? + * @support_tx_backoff: Support tx-backoff? + */ +struct iwl_tt_params { + u32 ct_kill_entry; + u32 ct_kill_exit; + u32 ct_kill_duration; + u32 dynamic_smps_entry; + u32 dynamic_smps_exit; + u32 tx_protection_entry; + u32 tx_protection_exit; + struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; + bool support_ct_kill; + bool support_dynamic_smps; + bool support_tx_protection; + bool support_tx_backoff; +}; + +/* + * information on how to parse the EEPROM + */ +#define EEPROM_REG_BAND_1_CHANNELS 0x08 +#define EEPROM_REG_BAND_2_CHANNELS 0x26 +#define EEPROM_REG_BAND_3_CHANNELS 0x42 +#define EEPROM_REG_BAND_4_CHANNELS 0x5C +#define EEPROM_REG_BAND_5_CHANNELS 0x74 +#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82 +#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92 +#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 +#define EEPROM_REGULATORY_BAND_NO_HT40 0 + +/* lower blocks contain EEPROM image and calibration data */ +#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ +#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */ +#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */ + +struct iwl_eeprom_params { + const u8 regulatory_bands[7]; + bool enhanced_txpower; +}; + +/* Tx-backoff power threshold + * @pwr: The power limit in mw + * @backoff: The tx-backoff in uSec + */ +struct iwl_pwr_tx_backoff { + u32 pwr; + u32 backoff; +}; + +/** + * struct iwl_cfg + * @name: Official name of the device + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_ok: oldest version of the uCode API that is OK to load + * without a warning, for use in transitions + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @max_inst_size: The maximal length of the fw inst section + * @max_data_size: The maximal length of the fw data section + * @valid_tx_ant: valid transmit antenna + * @valid_rx_ant: valid receive antenna + * @non_shared_ant: the antenna that is for WiFi only + * @nvm_ver: NVM version + * @nvm_calib_ver: NVM calibration version + * @lib: pointer to the lib ops + * @base_params: pointer to basic parameters + * @ht_params: point to ht parameters + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * @rx_with_siso_diversity: 1x1 device with rx antenna diversity + * @internal_wimax_coex: internal wifi/wimax combo device + * @high_temp: Is this NIC is designated to be in high temperature. + * @host_interrupt_operation_mode: device needs host interrupt operation + * mode set + * @d0i3: device uses d0i3 instead of d3 + * @nvm_hw_section_num: the ID of the HW NVM section + * @features: hw features, any combination of feature_whitelist + * @pwr_tx_backoffs: translation table between power limits and backoffs + * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response + * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response + * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the + * station can receive in HT + * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the + * station can receive in VHT + * @dccm_offset: offset from which DCCM begins + * @dccm_len: length of DCCM (including runtime stack CCM) + * @dccm2_offset: offset from which the second DCCM begins + * @dccm2_len: length of the second DCCM + * @smem_offset: offset from which the SMEM begins + * @smem_len: the length of SMEM + * + * We enable the driver to be backward compatible wrt. hardware features. + * API differences in uCode shouldn't be handled here but through TLVs + * and/or the uCode API version instead. + */ +struct iwl_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_ok; + const unsigned int ucode_api_min; + const enum iwl_device_family device_family; + const u32 max_data_size; + const u32 max_inst_size; + u8 valid_tx_ant; + u8 valid_rx_ant; + u8 non_shared_ant; + bool bt_shared_single_ant; + u16 nvm_ver; + u16 nvm_calib_ver; + /* params not likely to change within a device family */ + const struct iwl_base_params *base_params; + /* params likely to change within a device family */ + const struct iwl_ht_params *ht_params; + const struct iwl_eeprom_params *eeprom_params; + enum iwl_led_mode led_mode; + const bool rx_with_siso_diversity; + const bool internal_wimax_coex; + const bool host_interrupt_operation_mode; + bool high_temp; + bool d0i3; + u8 nvm_hw_section_num; + bool lp_xtal_workaround; + const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; + bool no_power_up_nic_in_init; + const char *default_nvm_file_B_step; + const char *default_nvm_file_C_step; + netdev_features_t features; + unsigned int max_rx_agg_size; + bool disable_dummy_notification; + unsigned int max_tx_agg_size; + unsigned int max_ht_ampdu_exponent; + unsigned int max_vht_ampdu_exponent; + const u32 dccm_offset; + const u32 dccm_len; + const u32 dccm2_offset; + const u32 dccm2_len; + const u32 smem_offset; + const u32 smem_len; + const struct iwl_tt_params *thermal_params; + bool apmg_not_supported; +}; + +/* + * This list declares the config structures for all devices. + */ +#if IS_ENABLED(CONFIG_IWLDVM) +extern const struct iwl_cfg iwl5300_agn_cfg; +extern const struct iwl_cfg iwl5100_agn_cfg; +extern const struct iwl_cfg iwl5350_agn_cfg; +extern const struct iwl_cfg iwl5100_bgn_cfg; +extern const struct iwl_cfg iwl5100_abg_cfg; +extern const struct iwl_cfg iwl5150_agn_cfg; +extern const struct iwl_cfg iwl5150_abg_cfg; +extern const struct iwl_cfg iwl6005_2agn_cfg; +extern const struct iwl_cfg iwl6005_2abg_cfg; +extern const struct iwl_cfg iwl6005_2bg_cfg; +extern const struct iwl_cfg iwl6005_2agn_sff_cfg; +extern const struct iwl_cfg iwl6005_2agn_d_cfg; +extern const struct iwl_cfg iwl6005_2agn_mow1_cfg; +extern const struct iwl_cfg iwl6005_2agn_mow2_cfg; +extern const struct iwl_cfg iwl1030_bgn_cfg; +extern const struct iwl_cfg iwl1030_bg_cfg; +extern const struct iwl_cfg iwl6030_2agn_cfg; +extern const struct iwl_cfg iwl6030_2abg_cfg; +extern const struct iwl_cfg iwl6030_2bgn_cfg; +extern const struct iwl_cfg iwl6030_2bg_cfg; +extern const struct iwl_cfg iwl6000i_2agn_cfg; +extern const struct iwl_cfg iwl6000i_2abg_cfg; +extern const struct iwl_cfg iwl6000i_2bg_cfg; +extern const struct iwl_cfg iwl6000_3agn_cfg; +extern const struct iwl_cfg iwl6050_2agn_cfg; +extern const struct iwl_cfg iwl6050_2abg_cfg; +extern const struct iwl_cfg iwl6150_bgn_cfg; +extern const struct iwl_cfg iwl6150_bg_cfg; +extern const struct iwl_cfg iwl1000_bgn_cfg; +extern const struct iwl_cfg iwl1000_bg_cfg; +extern const struct iwl_cfg iwl100_bgn_cfg; +extern const struct iwl_cfg iwl100_bg_cfg; +extern const struct iwl_cfg iwl130_bgn_cfg; +extern const struct iwl_cfg iwl130_bg_cfg; +extern const struct iwl_cfg iwl2000_2bgn_cfg; +extern const struct iwl_cfg iwl2000_2bgn_d_cfg; +extern const struct iwl_cfg iwl2030_2bgn_cfg; +extern const struct iwl_cfg iwl6035_2agn_cfg; +extern const struct iwl_cfg iwl6035_2agn_sff_cfg; +extern const struct iwl_cfg iwl105_bgn_cfg; +extern const struct iwl_cfg iwl105_bgn_d_cfg; +extern const struct iwl_cfg iwl135_bgn_cfg; +#endif /* CONFIG_IWLDVM */ +#if IS_ENABLED(CONFIG_IWLMVM) +extern const struct iwl_cfg iwl7260_2ac_cfg; +extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; +extern const struct iwl_cfg iwl7260_2n_cfg; +extern const struct iwl_cfg iwl7260_n_cfg; +extern const struct iwl_cfg iwl3160_2ac_cfg; +extern const struct iwl_cfg iwl3160_2n_cfg; +extern const struct iwl_cfg iwl3160_n_cfg; +extern const struct iwl_cfg iwl3165_2ac_cfg; +extern const struct iwl_cfg iwl7265_2ac_cfg; +extern const struct iwl_cfg iwl7265_2n_cfg; +extern const struct iwl_cfg iwl7265_n_cfg; +extern const struct iwl_cfg iwl7265d_2ac_cfg; +extern const struct iwl_cfg iwl7265d_2n_cfg; +extern const struct iwl_cfg iwl7265d_n_cfg; +extern const struct iwl_cfg iwl8260_2n_cfg; +extern const struct iwl_cfg iwl8260_2ac_cfg; +extern const struct iwl_cfg iwl4165_2ac_cfg; +extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; +extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; +#endif /* CONFIG_IWLMVM */ + +#endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h new file mode 100644 index 000000000000..543abeaffcf0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -0,0 +1,552 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_csr_h__ +#define __iwl_csr_h__ +/* + * CSR (control and status registers) + * + * CSR registers are mapped directly into PCI bus space, and are accessible + * whenever platform supplies power to device, even when device is in + * low power states due to driver-invoked device resets + * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. + * + * Use iwl_write32() and iwl_read32() family to access these registers; + * these provide simple PCI bus access, without waking up the MAC. + * Do not use iwl_write_direct32() family for these registers; + * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. + * The MAC (uCode processor, etc.) does not need to be powered up for accessing + * the CSR registers. + * + * NOTE: Device does need to be awake in order to read this memory + * via CSR_EEPROM and CSR_OTP registers + */ +#define CSR_BASE (0x000) + +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_GP_CNTRL (CSR_BASE+0x024) + +/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ +#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) + +/* + * Hardware revision info + * Bit fields: + * 31-16: Reserved + * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions + * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D + * 1-0: "Dash" (-) value, as in A-1, etc. + */ +#define CSR_HW_REV (CSR_BASE+0x028) + +/* + * EEPROM and OTP (one-time-programmable) memory reads + * + * NOTE: Device must be awake, initialized via apm_ops.init(), + * in order to read. + */ +#define CSR_EEPROM_REG (CSR_BASE+0x02c) +#define CSR_EEPROM_GP (CSR_BASE+0x030) +#define CSR_OTP_GP_REG (CSR_BASE+0x034) + +#define CSR_GIO_REG (CSR_BASE+0x03C) +#define CSR_GP_UCODE_REG (CSR_BASE+0x048) +#define CSR_GP_DRIVER_REG (CSR_BASE+0x050) + +/* + * UCODE-DRIVER GP (general purpose) mailbox registers. + * SET/CLR registers set/clear bit(s) if "1" is written. + */ +#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) +#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) +#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) +#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) + +#define CSR_MBOX_SET_REG (CSR_BASE + 0x88) + +#define CSR_LED_REG (CSR_BASE+0x094) +#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) +#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */ + + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) + +/* Analog phase-lock-loop configuration */ +#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) + +/* + * CSR HW resources monitor registers + */ +#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214) +#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228) +#define CSR_MONITOR_XTAL_RESOURCES (0x00000010) + +/* + * CSR Hardware Revision Workaround Register. Indicates hardware rev; + * "step" determines CCK backoff for txpower calculation. Used for 4965 only. + * See also CSR_HW_REV register. + * Bit fields: + * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step + * 1-0: "Dash" (-) value, as in C-1, etc. + */ +#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) + +#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) +#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) + +/* Bits for CSR_HW_IF_CONFIG_REG */ +#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) +#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) +#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) +#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) +#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00) +#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000) +#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000) + +#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0) +#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2) +#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6) +#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10) +#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12) +#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14) + +#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) +#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ +#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ +#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000) +#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ + +#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) + +#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ +#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ + +/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), + * acknowledged (reset) by host writing "1" to flagged bits. */ +#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ +#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ +#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */ +#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ +#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ + +#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ + CSR_INT_BIT_HW_ERR | \ + CSR_INT_BIT_FH_TX | \ + CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_PAGING | \ + CSR_INT_BIT_RF_KILL | \ + CSR_INT_BIT_SW_RX | \ + CSR_INT_BIT_WAKEUP | \ + CSR_INT_BIT_ALIVE | \ + CSR_INT_BIT_RX_PERIODIC) + +/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ +#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ + +#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + +#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +/* GPIO */ +#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) +#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) +#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) + +/* RESET */ +#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) +#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) +#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) +#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) +#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) +#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) + +/* + * GP (general purpose) CONTROL REGISTER + * Bit fields: + * 27: HW_RF_KILL_SW + * Indicates state of (platform's) hardware RF-Kill switch + * 26-24: POWER_SAVE_TYPE + * Indicates current power-saving mode: + * 000 -- No power saving + * 001 -- MAC power-down + * 010 -- PHY (radio) power-down + * 011 -- Error + * 10: XTAL ON request + * 9-6: SYS_CONFIG + * Indicates current system configuration, reflecting pins on chip + * as forced high/low by device circuit board. + * 4: GOING_TO_SLEEP + * Indicates MAC is entering a power-saving sleep power-down. + * Not a good time to access device-internal resources. + * 3: MAC_ACCESS_REQ + * Host sets this to request and maintain MAC wakeup, to allow host + * access to device-internal resources. Host must wait for + * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR + * device registers. + * 2: INIT_DONE + * Host sets this to put device into fully operational D0 power mode. + * Host resets this after SW_RESET to put device into low power mode. + * 0: MAC_CLOCK_READY + * Indicates MAC (ucode processor, etc.) is powered up and can run. + * Internal resources are accessible. + * NOTE: This does not indicate that the processor is actually running. + * NOTE: This does not indicate that device has completed + * init or post-power-down restore of internal SRAM memory. + * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that + * SRAM is restored and uCode is in normal operation mode. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + * NOTE: After device reset, this bit remains "0" until host sets + * INIT_DONE + */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) +#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) +#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) +#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) + +#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) + +#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) +#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + + +/* HW REV */ +#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) +#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2) + + +/** + * hw_rev values + */ +enum { + SILICON_A_STEP = 0, + SILICON_B_STEP, + SILICON_C_STEP, +}; + + +#define CSR_HW_REV_TYPE_MSK (0x000FFF0) +#define CSR_HW_REV_TYPE_5300 (0x0000020) +#define CSR_HW_REV_TYPE_5350 (0x0000030) +#define CSR_HW_REV_TYPE_5100 (0x0000050) +#define CSR_HW_REV_TYPE_5150 (0x0000040) +#define CSR_HW_REV_TYPE_1000 (0x0000060) +#define CSR_HW_REV_TYPE_6x00 (0x0000070) +#define CSR_HW_REV_TYPE_6x50 (0x0000080) +#define CSR_HW_REV_TYPE_6150 (0x0000084) +#define CSR_HW_REV_TYPE_6x05 (0x00000B0) +#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 +#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 +#define CSR_HW_REV_TYPE_2x30 (0x00000C0) +#define CSR_HW_REV_TYPE_2x00 (0x0000100) +#define CSR_HW_REV_TYPE_105 (0x0000110) +#define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_7265D (0x0000210) +#define CSR_HW_REV_TYPE_NONE (0x00001F0) + +/* EEPROM REG */ +#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) +#define CSR_EEPROM_REG_BIT_CMD (0x00000002) +#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) +#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) + +/* EEPROM GP */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ +#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) +#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) +#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) + +/* One-time-programmable memory general purpose reg */ +#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ +#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ +#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ +#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ + +/* GP REG */ +#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ +#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) +#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) +#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) +#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) + + +/* CSR GIO */ +#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) + +/* + * UCODE-DRIVER GP (general purpose) mailbox register 1 + * Host driver and uCode write and/or read this register to communicate with + * each other. + * Bit fields: + * 4: UCODE_DISABLE + * Host sets this to request permanent halt of uCode, same as + * sending CARD_STATE command with "halt" bit set. + * 3: CT_KILL_EXIT + * Host sets this to request exit from CT_KILL state, i.e. host thinks + * device temperature is low enough to continue normal operation. + * 2: CMD_BLOCKED + * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) + * to release uCode to clear all Tx and command queues, enter + * unassociated mode, and power down. + * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. + * 1: SW_BIT_RFKILL + * Host sets this when issuing CARD_STATE command to request + * device sleep. + * 0: MAC_SLEEP + * uCode sets this when preparing a power-saving power-down. + * uCode resets this when power-up is complete and SRAM is sane. + * NOTE: device saves internal SRAM data to host when powering down, + * and must restore this data after powering back up. + * MAC_SLEEP is the best indication that restore is complete. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + */ +#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) +#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) +#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) +#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) +#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020) + +/* GP Driver */ +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) +#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) +#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) + +#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) +#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) + +/* LED */ +#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) +#define CSR_LED_REG_TURN_ON (0x60) +#define CSR_LED_REG_TURN_OFF (0x20) + +/* ANA_PLL */ +#define CSR50_ANA_PLL_CFG_VAL (0x00880300) + +/* HPET MEM debug */ +#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) + +/* DRAM INT TABLE */ +#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) +#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) + +/* + * SHR target access (Shared block memory space) + * + * Shared internal registers can be accessed directly from PCI bus through SHR + * arbiter without need for the MAC HW to be powered up. This is possible due to + * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and + * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers. + * + * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW + * need not be powered up so no "grab inc access" is required. + */ + +/* + * Registers for accessing shared registers (e.g. SHR_APMG_GP1, + * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC), + * first, write to the control register: + * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) + * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access) + * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0]. + * + * To write the register, first, write to the data register + * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then: + * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) + * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access) + */ +#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec) +#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4) + +/* + * HBUS (Host-side Bus) + * + * HBUS registers are mapped directly into PCI bus space, but are used + * to indirectly access device's internal memory or registers that + * may be powered-down. + * + * Use iwl_write_direct32()/iwl_read_direct32() family for these registers; + * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ + * to make sure the MAC (uCode processor, etc.) is powered up for accessing + * internal resources. + * + * Do not use iwl_write32()/iwl_read32() family to access these registers; + * these provide only simple PCI bus access, without waking up the MAC. + */ +#define HBUS_BASE (0x400) + +/* + * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM + * structures, error log, event log, verifying uCode load). + * First write to address register, then read from or write to data register + * to complete the job. Once the address register is set up, accesses to + * data registers auto-increment the address by one dword. + * Bit usage for address registers (read or write): + * 0-31: memory address within device + */ +#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) +#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) +#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) +#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) + +/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ +#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) +#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) + +/* + * Registers for accessing device's internal peripheral registers + * (e.g. SCD, BSM, etc.). First write to address register, + * then read from or write to data register to complete the job. + * Bit usage for address registers (read or write): + * 0-15: register address (offset) within device + * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) + */ +#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) +#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) +#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) +#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) + +/* Used to enable DBGM */ +#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c) + +/* + * Per-Tx-queue write pointer (index, really!) + * Indicates index to next TFD that driver will fill (1 past latest filled). + * Bit usage: + * 0-7: queue write index + * 11-8: queue selector + */ +#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) + +/********************************************************** + * CSR values + **********************************************************/ + /* + * host interrupt timeout value + * used with setting interrupt coalescing timer + * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit + * + * default interrupt coalescing timer is 64 x 32 = 2048 usecs + */ +#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_TIMEOUT_DEF (0x40) +#define IWL_HOST_INT_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_OPER_MODE BIT(31) + +/***************************************************************************** + * 7000/3000 series SHR DTS addresses * + *****************************************************************************/ + +/* Diode Results Register Structure: */ +enum dtd_diode_reg { + DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ + DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ + DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ + DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ + DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ + DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ +/* Those are the masks INSIDE the flags bit-field: */ + DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, + DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ + DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, + DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ +}; + +#endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c new file mode 100644 index 000000000000..09feff4fa226 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c @@ -0,0 +1,136 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include +#include "iwl-drv.h" +#include "iwl-debug.h" +#include "iwl-devtrace.h" + +#define __iwl_fn(fn) \ +void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ +{ \ + struct va_format vaf = { \ + .fmt = fmt, \ + }; \ + va_list args; \ + \ + va_start(args, fmt); \ + vaf.va = &args; \ + dev_ ##fn(dev, "%pV", &vaf); \ + trace_iwlwifi_ ##fn(&vaf); \ + va_end(args); \ +} + +__iwl_fn(warn) +IWL_EXPORT_SYMBOL(__iwl_warn); +__iwl_fn(info) +IWL_EXPORT_SYMBOL(__iwl_info); +__iwl_fn(crit) +IWL_EXPORT_SYMBOL(__iwl_crit); + +void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, + const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + if (!trace_only) { + if (rfkill_prefix) + dev_err(dev, "(RFKILL) %pV", &vaf); + else + dev_err(dev, "%pV", &vaf); + } + trace_iwlwifi_err(&vaf); + va_end(args); +} +IWL_EXPORT_SYMBOL(__iwl_err); + +#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) +void __iwl_dbg(struct device *dev, + u32 level, bool limit, const char *function, + const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_have_debug_level(level) && + (!limit || net_ratelimit())) + dev_printk(KERN_DEBUG, dev, "%c %s %pV", + in_interrupt() ? 'I' : 'U', function, &vaf); +#endif + trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); + va_end(args); +} +IWL_EXPORT_SYMBOL(__iwl_dbg); +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h new file mode 100644 index 000000000000..9bb36d79c2bd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -0,0 +1,225 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_debug_h__ +#define __iwl_debug_h__ + +#include "iwl-modparams.h" + + +static inline bool iwl_have_debug_level(u32 level) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + return iwlwifi_mod_params.debug_level & level; +#else + return false; +#endif +} + +void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, + const char *fmt, ...) __printf(4, 5); +void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3); +void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3); +void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); + +/* not all compilers can evaluate strlen() at compile time, so use sizeof() */ +#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n') + +/* No matter what is m (priv, bus, trans), this will work */ +#define IWL_ERR_DEV(d, f, a...) \ + do { \ + CHECK_FOR_NEWLINE(f); \ + __iwl_err((d), false, false, f, ## a); \ + } while (0) +#define IWL_ERR(m, f, a...) \ + IWL_ERR_DEV((m)->dev, f, ## a) +#define IWL_WARN(m, f, a...) \ + do { \ + CHECK_FOR_NEWLINE(f); \ + __iwl_warn((m)->dev, f, ## a); \ + } while (0) +#define IWL_INFO(m, f, a...) \ + do { \ + CHECK_FOR_NEWLINE(f); \ + __iwl_info((m)->dev, f, ## a); \ + } while (0) +#define IWL_CRIT(m, f, a...) \ + do { \ + CHECK_FOR_NEWLINE(f); \ + __iwl_crit((m)->dev, f, ## a); \ + } while (0) + +#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) +void __iwl_dbg(struct device *dev, + u32 level, bool limit, const char *function, + const char *fmt, ...) __printf(5, 6); +#else +__printf(5, 6) static inline void +__iwl_dbg(struct device *dev, + u32 level, bool limit, const char *function, + const char *fmt, ...) +{} +#endif + +#define iwl_print_hex_error(m, p, len) \ +do { \ + print_hex_dump(KERN_ERR, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \ + do { \ + CHECK_FOR_NEWLINE(fmt); \ + __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \ + } while (0) +#define IWL_DEBUG(m, level, fmt, args...) \ + __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args) +#define IWL_DEBUG_DEV(dev, level, fmt, args...) \ + __IWL_DEBUG_DEV(dev, level, false, fmt, ##args) +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ + __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args) + +#ifdef CONFIG_IWLWIFI_DEBUG +#define iwl_print_hex_dump(m, level, p, len) \ +do { \ + if (iwl_have_debug_level(level)) \ + print_hex_dump(KERN_DEBUG, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) +#else +#define iwl_print_hex_dump(m, level, p, len) +#endif /* CONFIG_IWLWIFI_DEBUG */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of + * + * #define IWL_DL_xxxx VALUE + * + * where xxxx should be the name of the classification (for example, WEP). + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * The active debug levels can be accessed via files + * + * /sys/module/iwlwifi/parameters/debug + * when CONFIG_IWLWIFI_DEBUG=y. + * + * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level + * when CONFIG_IWLWIFI_DEBUGFS=y. + * + */ + +/* 0x0000000F - 0x00000001 */ +#define IWL_DL_INFO 0x00000001 +#define IWL_DL_MAC80211 0x00000002 +#define IWL_DL_HCMD 0x00000004 +#define IWL_DL_TDLS 0x00000008 +/* 0x000000F0 - 0x00000010 */ +#define IWL_DL_QUOTA 0x00000010 +#define IWL_DL_TE 0x00000020 +#define IWL_DL_EEPROM 0x00000040 +#define IWL_DL_RADIO 0x00000080 +/* 0x00000F00 - 0x00000100 */ +#define IWL_DL_POWER 0x00000100 +#define IWL_DL_TEMP 0x00000200 +#define IWL_DL_RPM 0x00000400 +#define IWL_DL_SCAN 0x00000800 +/* 0x0000F000 - 0x00001000 */ +#define IWL_DL_ASSOC 0x00001000 +#define IWL_DL_DROP 0x00002000 +#define IWL_DL_LAR 0x00004000 +#define IWL_DL_COEX 0x00008000 +/* 0x000F0000 - 0x00010000 */ +#define IWL_DL_FW 0x00010000 +#define IWL_DL_RF_KILL 0x00020000 +#define IWL_DL_FW_ERRORS 0x00040000 +#define IWL_DL_LED 0x00080000 +/* 0x00F00000 - 0x00100000 */ +#define IWL_DL_RATE 0x00100000 +#define IWL_DL_CALIB 0x00200000 +#define IWL_DL_WEP 0x00400000 +#define IWL_DL_TX 0x00800000 +/* 0x0F000000 - 0x01000000 */ +#define IWL_DL_RX 0x01000000 +#define IWL_DL_ISR 0x02000000 +#define IWL_DL_HT 0x04000000 +#define IWL_DL_EXTERNAL 0x08000000 +/* 0xF0000000 - 0x10000000 */ +#define IWL_DL_11H 0x10000000 +#define IWL_DL_STATS 0x20000000 +#define IWL_DL_TX_REPLY 0x40000000 +#define IWL_DL_TX_QUEUES 0x80000000 + +#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a) +#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) +#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a) +#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a) +#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) +#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a) +#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a) +#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_ASSOC(p, f, a...) \ + IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) +#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) +#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) +#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h new file mode 100644 index 000000000000..71a78cede9b0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -0,0 +1,80 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_DATA + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_data + +TRACE_EVENT(iwlwifi_dev_tx_data, + TP_PROTO(const struct device *dev, + struct sk_buff *skb, + u8 hdr_len, size_t data_len), + TP_ARGS(dev, skb, hdr_len, data_len), + TP_STRUCT__entry( + DEV_ENTRY + + __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0) + ), + TP_fast_assign( + DEV_ASSIGN; + if (iwl_trace_data(skb)) + skb_copy_bits(skb, hdr_len, + __get_dynamic_array(data), data_len); + ), + TP_printk("[%s] TX frame data", __get_str(dev)) +); + +TRACE_EVENT(iwlwifi_dev_rx_data, + TP_PROTO(const struct device *dev, + const struct iwl_trans *trans, + void *rxbuf, size_t len), + TP_ARGS(dev, trans, rxbuf, len), + TP_STRUCT__entry( + DEV_ENTRY + + __dynamic_array(u8, data, + len - iwl_rx_trace_len(trans, rxbuf, len)) + ), + TP_fast_assign( + size_t offs = iwl_rx_trace_len(trans, rxbuf, len); + DEV_ASSIGN; + if (offs < len) + memcpy(__get_dynamic_array(data), + ((u8 *)rxbuf) + offs, len - offs); + ), + TP_printk("[%s] RX frame data", __get_str(dev)) +); +#endif /* __IWLWIFI_DEVICE_TRACE_DATA */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace-data +#include diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h new file mode 100644 index 000000000000..f62c54485852 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h @@ -0,0 +1,155 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_IO + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_io + +TRACE_EVENT(iwlwifi_dev_ioread32, + TP_PROTO(const struct device *dev, u32 offs, u32 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] read io[%#x] = %#x", + __get_str(dev), __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_iowrite8, + TP_PROTO(const struct device *dev, u32 offs, u8 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, offs) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] write io[%#x] = %#x)", + __get_str(dev), __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_iowrite32, + TP_PROTO(const struct device *dev, u32 offs, u32 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] write io[%#x] = %#x)", + __get_str(dev), __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_iowrite_prph32, + TP_PROTO(const struct device *dev, u32 offs, u32 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] write PRPH[%#x] = %#x)", + __get_str(dev), __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_ioread_prph32, + TP_PROTO(const struct device *dev, u32 offs, u32 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] read PRPH[%#x] = %#x", + __get_str(dev), __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_irq, + TP_PROTO(const struct device *dev), + TP_ARGS(dev), + TP_STRUCT__entry( + DEV_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + ), + /* TP_printk("") doesn't compile */ + TP_printk("%d", 0) +); + +TRACE_EVENT(iwlwifi_dev_ict_read, + TP_PROTO(const struct device *dev, u32 index, u32 value), + TP_ARGS(dev, index, value), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, index) + __field(u32, value) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->index = index; + __entry->value = value; + ), + TP_printk("[%s] read ict[%d] = %#.8x", + __get_str(dev), __entry->index, __entry->value) +); +#endif /* __IWLWIFI_DEVICE_TRACE_IO */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace-io +#include diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h new file mode 100644 index 000000000000..eb4b99a1c8cd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -0,0 +1,209 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_IWLWIFI + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi + +TRACE_EVENT(iwlwifi_dev_hcmd, + TP_PROTO(const struct device *dev, + struct iwl_host_cmd *cmd, u16 total_size, + struct iwl_cmd_header_wide *hdr), + TP_ARGS(dev, cmd, total_size, hdr), + TP_STRUCT__entry( + DEV_ENTRY + __dynamic_array(u8, hcmd, total_size) + __field(u32, flags) + ), + TP_fast_assign( + int i, offset = sizeof(struct iwl_cmd_header); + + if (hdr->group_id) + offset = sizeof(struct iwl_cmd_header_wide); + + DEV_ASSIGN; + __entry->flags = cmd->flags; + memcpy(__get_dynamic_array(hcmd), hdr, offset); + + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + if (!cmd->len[i]) + continue; + memcpy((u8 *)__get_dynamic_array(hcmd) + offset, + cmd->data[i], cmd->len[i]); + offset += cmd->len[i]; + } + ), + TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)", + __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1], + ((u8 *)__get_dynamic_array(hcmd))[0], + __entry->flags & CMD_ASYNC ? "a" : "") +); + +TRACE_EVENT(iwlwifi_dev_rx, + TP_PROTO(const struct device *dev, const struct iwl_trans *trans, + struct iwl_rx_packet *pkt, size_t len), + TP_ARGS(dev, trans, pkt, len), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, cmd) + __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cmd = pkt->hdr.cmd; + memcpy(__get_dynamic_array(rxbuf), pkt, + iwl_rx_trace_len(trans, pkt, len)); + ), + TP_printk("[%s] RX cmd %#.2x", + __get_str(dev), __entry->cmd) +); + +TRACE_EVENT(iwlwifi_dev_tx, + TP_PROTO(const struct device *dev, struct sk_buff *skb, + void *tfd, size_t tfdlen, + void *buf0, size_t buf0_len, + void *buf1, size_t buf1_len), + TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), + TP_STRUCT__entry( + DEV_ENTRY + + __field(size_t, framelen) + __dynamic_array(u8, tfd, tfdlen) + + /* + * Do not insert between or below these items, + * we want to keep the frame together (except + * for the possible padding). + */ + __dynamic_array(u8, buf0, buf0_len) + __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->framelen = buf0_len + buf1_len; + memcpy(__get_dynamic_array(tfd), tfd, tfdlen); + memcpy(__get_dynamic_array(buf0), buf0, buf0_len); + if (!iwl_trace_data(skb)) + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); + ), + TP_printk("[%s] TX %.2x (%zu bytes)", + __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], + __entry->framelen) +); + +TRACE_EVENT(iwlwifi_dev_ucode_error, + TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, + u32 data1, u32 data2, u32 line, u32 blink1, + u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, + u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver, + u32 brd_ver), + TP_ARGS(dev, desc, tsf_low, data1, data2, line, + blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, + gp3, major, minor, hw_ver, brd_ver), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, desc) + __field(u32, tsf_low) + __field(u32, data1) + __field(u32, data2) + __field(u32, line) + __field(u32, blink1) + __field(u32, blink2) + __field(u32, ilink1) + __field(u32, ilink2) + __field(u32, bcon_time) + __field(u32, gp1) + __field(u32, gp2) + __field(u32, gp3) + __field(u32, major) + __field(u32, minor) + __field(u32, hw_ver) + __field(u32, brd_ver) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->desc = desc; + __entry->tsf_low = tsf_low; + __entry->data1 = data1; + __entry->data2 = data2; + __entry->line = line; + __entry->blink1 = blink1; + __entry->blink2 = blink2; + __entry->ilink1 = ilink1; + __entry->ilink2 = ilink2; + __entry->bcon_time = bcon_time; + __entry->gp1 = gp1; + __entry->gp2 = gp2; + __entry->gp3 = gp3; + __entry->major = major; + __entry->minor = minor; + __entry->hw_ver = hw_ver; + __entry->brd_ver = brd_ver; + ), + TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " + "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " + "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X " + "minor 0x%08X hw 0x%08X brd 0x%08X", + __get_str(dev), __entry->desc, __entry->tsf_low, + __entry->data1, + __entry->data2, __entry->line, __entry->blink1, + __entry->blink2, __entry->ilink1, __entry->ilink2, + __entry->bcon_time, __entry->gp1, __entry->gp2, + __entry->gp3, __entry->major, __entry->minor, + __entry->hw_ver, __entry->brd_ver) +); + +TRACE_EVENT(iwlwifi_dev_ucode_event, + TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), + TP_ARGS(dev, time, data, ev), + TP_STRUCT__entry( + DEV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", + __get_str(dev), __entry->time, __entry->data, __entry->ev) +); +#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace-iwlwifi +#include diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h new file mode 100644 index 000000000000..a3b3c2465f89 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h @@ -0,0 +1,97 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_MSG + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_msg + +#define MAX_MSG_LEN 110 + +DECLARE_EVENT_CLASS(iwlwifi_msg_event, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf), + TP_STRUCT__entry( + __dynamic_array(char, msg, MAX_MSG_LEN) + ), + TP_fast_assign( + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + MAX_MSG_LEN, vaf->fmt, + *vaf->va) >= MAX_MSG_LEN); + ), + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +TRACE_EVENT(iwlwifi_dbg, + TP_PROTO(u32 level, bool in_interrupt, const char *function, + struct va_format *vaf), + TP_ARGS(level, in_interrupt, function, vaf), + TP_STRUCT__entry( + __field(u32, level) + __field(u8, in_interrupt) + __string(function, function) + __dynamic_array(char, msg, MAX_MSG_LEN) + ), + TP_fast_assign( + __entry->level = level; + __entry->in_interrupt = in_interrupt; + __assign_str(function, function); + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + MAX_MSG_LEN, vaf->fmt, + *vaf->va) >= MAX_MSG_LEN); + ), + TP_printk("%s", __get_str(msg)) +); +#endif /* __IWLWIFI_DEVICE_TRACE_MSG */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace-msg +#include diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h new file mode 100644 index 000000000000..10839fae9cd9 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h @@ -0,0 +1,81 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_UCODE + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_ucode + +TRACE_EVENT(iwlwifi_dev_ucode_cont_event, + TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), + TP_ARGS(dev, time, data, ev), + TP_STRUCT__entry( + DEV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", + __get_str(dev), __entry->time, __entry->data, __entry->ev) +); + +TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, + TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry), + TP_ARGS(dev, wraps, n_entry, p_entry), + TP_STRUCT__entry( + DEV_ENTRY + + __field(u32, wraps) + __field(u32, n_entry) + __field(u32, p_entry) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->wraps = wraps; + __entry->n_entry = n_entry; + __entry->p_entry = p_entry; + ), + TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X", + __get_str(dev), __entry->wraps, __entry->n_entry, + __entry->p_entry) +); +#endif /* __IWLWIFI_DEVICE_TRACE_UCODE */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace-ucode +#include diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c new file mode 100644 index 000000000000..90987d6f348e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c @@ -0,0 +1,43 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ +#include "iwl-trans.h" + +#define CREATE_TRACE_POINTS +#include "iwl-devtrace.h" + +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h new file mode 100644 index 000000000000..b87acd6a229b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -0,0 +1,89 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __IWLWIFI_DEVICE_TRACE +#include +#include +#include +#include "iwl-trans.h" +#if !defined(__IWLWIFI_DEVICE_TRACE) +static inline bool iwl_trace_data(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (!ieee80211_is_data(hdr->frame_control)) + return false; + return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO); +} + +static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, + void *rxbuf, size_t len) +{ + struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32)); + struct ieee80211_hdr *hdr; + + if (cmd->cmd != trans->rx_mpdu_cmd) + return len; + + hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) + + trans->rx_mpdu_cmd_hdr_size); + if (!ieee80211_is_data(hdr->frame_control)) + return len; + /* maybe try to identify EAPOL frames? */ + return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size + + ieee80211_hdrlen(hdr->frame_control); +} +#endif + +#define __IWLWIFI_DEVICE_TRACE + +#include +#include +#include "iwl-trans.h" + + +#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + +#define DEV_ENTRY __string(dev, dev_name(dev)) +#define DEV_ASSIGN __assign_str(dev, dev_name(dev)) + +#include "iwl-devtrace-io.h" +#include "iwl-devtrace-ucode.h" +#include "iwl-devtrace-msg.h" +#include "iwl-devtrace-data.h" +#include "iwl-devtrace-iwlwifi.h" + +#endif /* __IWLWIFI_DEVICE_TRACE */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c new file mode 100644 index 000000000000..463cadfbfccb --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -0,0 +1,1706 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include "iwl-drv.h" +#include "iwl-csr.h" +#include "iwl-debug.h" +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-agn-hw.h" +#include "iwl-fw.h" +#include "iwl-config.h" +#include "iwl-modparams.h" + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static struct dentry *iwl_dbgfs_root; +#endif + +/** + * struct iwl_drv - drv common data + * @list: list of drv structures using this opmode + * @fw: the iwl_fw structure + * @op_mode: the running op_mode + * @trans: transport layer + * @dev: for debug prints only + * @cfg: configuration struct + * @fw_index: firmware revision to try loading + * @firmware_name: composite filename of ucode file to load + * @request_firmware_complete: the firmware has been obtained from user space + */ +struct iwl_drv { + struct list_head list; + struct iwl_fw fw; + + struct iwl_op_mode *op_mode; + struct iwl_trans *trans; + struct device *dev; + const struct iwl_cfg *cfg; + + int fw_index; /* firmware we're trying to load */ + char firmware_name[32]; /* name of firmware file to load */ + + struct completion request_firmware_complete; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct dentry *dbgfs_drv; + struct dentry *dbgfs_trans; + struct dentry *dbgfs_op_mode; +#endif +}; + +enum { + DVM_OP_MODE = 0, + MVM_OP_MODE = 1, +}; + +/* Protects the table contents, i.e. the ops pointer & drv list */ +static struct mutex iwlwifi_opmode_table_mtx; +static struct iwlwifi_opmode_table { + const char *name; /* name: iwldvm, iwlmvm, etc */ + const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ + struct list_head drv; /* list of devices using this op_mode */ +} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ + [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, + [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, +}; + +#define IWL_DEFAULT_SCAN_CHANNELS 40 + +/* + * struct fw_sec: Just for the image parsing process. + * For the fw storage we are using struct fw_desc. + */ +struct fw_sec { + const void *data; /* the sec data */ + size_t size; /* section size */ + u32 offset; /* offset of writing in the device */ +}; + +static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) +{ + vfree(desc->data); + desc->data = NULL; + desc->len = 0; +} + +static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) +{ + int i; + for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) + iwl_free_fw_desc(drv, &img->sec[i]); +} + +static void iwl_dealloc_ucode(struct iwl_drv *drv) +{ + int i; + + kfree(drv->fw.dbg_dest_tlv); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) + kfree(drv->fw.dbg_conf_tlv[i]); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) + kfree(drv->fw.dbg_trigger_tlv[i]); + + for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) + iwl_free_fw_img(drv, drv->fw.img + i); +} + +static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, + struct fw_sec *sec) +{ + void *data; + + desc->data = NULL; + + if (!sec || !sec->size) + return -EINVAL; + + data = vmalloc(sec->size); + if (!data) + return -ENOMEM; + + desc->len = sec->size; + desc->offset = sec->offset; + memcpy(data, sec->data, desc->len); + desc->data = data; + + return 0; +} + +static void iwl_req_fw_callback(const struct firmware *ucode_raw, + void *context); + +#define UCODE_EXPERIMENTAL_INDEX 100 +#define UCODE_EXPERIMENTAL_TAG "exp" + +static int iwl_request_firmware(struct iwl_drv *drv, bool first) +{ + const char *name_pre = drv->cfg->fw_name_pre; + char tag[8]; + + if (first) { +#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE + drv->fw_index = UCODE_EXPERIMENTAL_INDEX; + strcpy(tag, UCODE_EXPERIMENTAL_TAG); + } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) { +#endif + drv->fw_index = drv->cfg->ucode_api_max; + sprintf(tag, "%d", drv->fw_index); + } else { + drv->fw_index--; + sprintf(tag, "%d", drv->fw_index); + } + + if (drv->fw_index < drv->cfg->ucode_api_min) { + IWL_ERR(drv, "no suitable firmware found!\n"); + return -ENOENT; + } + + snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", + name_pre, tag); + + /* + * Starting 8000B - FW name format has changed. This overwrites the + * previous name and uses the new format. + */ + if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); + + snprintf(drv->firmware_name, sizeof(drv->firmware_name), + "%s%c-%s.ucode", name_pre, rev_step, tag); + } + + IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", + (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) + ? "EXPERIMENTAL " : "", + drv->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, + drv->trans->dev, + GFP_KERNEL, drv, iwl_req_fw_callback); +} + +struct fw_img_parsing { + struct fw_sec sec[IWL_UCODE_SECTION_MAX]; + int sec_counter; +}; + +/* + * struct fw_sec_parsing: to extract fw section and it's offset from tlv + */ +struct fw_sec_parsing { + __le32 offset; + const u8 data[]; +} __packed; + +/** + * struct iwl_tlv_calib_data - parse the default calib data from TLV + * + * @ucode_type: the uCode to which the following default calib relates. + * @calib: default calibrations. + */ +struct iwl_tlv_calib_data { + __le32 ucode_type; + struct iwl_tlv_calib_ctrl calib; +} __packed; + +struct iwl_firmware_pieces { + struct fw_img_parsing img[IWL_UCODE_TYPE_MAX]; + + u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; + u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; + + /* FW debug data parsed for driver usage */ + struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; +}; + +/* + * These functions are just to extract uCode section data from the pieces + * structure. + */ +static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec) +{ + return &pieces->img[type].sec[sec]; +} + +static void set_sec_data(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec, + const void *data) +{ + pieces->img[type].sec[sec].data = data; +} + +static void set_sec_size(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec, + size_t size) +{ + pieces->img[type].sec[sec].size = size; +} + +static size_t get_sec_size(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec) +{ + return pieces->img[type].sec[sec].size; +} + +static void set_sec_offset(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec, + u32 offset) +{ + pieces->img[type].sec[sec].offset = offset; +} + +static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) +{ + int i, j; + struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data; + struct iwl_fw_cipher_scheme *fwcs; + struct ieee80211_cipher_scheme *cs; + u32 cipher; + + if (len < sizeof(*l) || + len < sizeof(l->size) + l->size * sizeof(l->cs[0])) + return -EINVAL; + + for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) { + fwcs = &l->cs[j]; + cipher = le32_to_cpu(fwcs->cipher); + + /* we skip schemes with zero cipher suite selector */ + if (!cipher) + continue; + + cs = &fw->cs[j++]; + cs->cipher = cipher; + cs->iftype = BIT(NL80211_IFTYPE_STATION); + cs->hdr_len = fwcs->hdr_len; + cs->pn_len = fwcs->pn_len; + cs->pn_off = fwcs->pn_off; + cs->key_idx_off = fwcs->key_idx_off; + cs->key_idx_mask = fwcs->key_idx_mask; + cs->key_idx_shift = fwcs->key_idx_shift; + cs->mic_len = fwcs->mic_len; + } + + return 0; +} + +static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, + const u32 len) +{ + struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; + struct iwl_gscan_capabilities *capa = &fw->gscan_capa; + + if (len < sizeof(*fw_capa)) + return -EINVAL; + + capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); + capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); + capa->max_ap_cache_per_scan = + le32_to_cpu(fw_capa->max_ap_cache_per_scan); + capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size); + capa->max_scan_reporting_threshold = + le32_to_cpu(fw_capa->max_scan_reporting_threshold); + capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps); + capa->max_significant_change_aps = + le32_to_cpu(fw_capa->max_significant_change_aps); + capa->max_bssid_history_entries = + le32_to_cpu(fw_capa->max_bssid_history_entries); + return 0; +} + +/* + * Gets uCode section from tlv. + */ +static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, + const void *data, enum iwl_ucode_type type, + int size) +{ + struct fw_img_parsing *img; + struct fw_sec *sec; + struct fw_sec_parsing *sec_parse; + + if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) + return -1; + + sec_parse = (struct fw_sec_parsing *)data; + + img = &pieces->img[type]; + sec = &img->sec[img->sec_counter]; + + sec->offset = le32_to_cpu(sec_parse->offset); + sec->data = sec_parse->data; + sec->size = size - sizeof(sec_parse->offset); + + ++img->sec_counter; + + return 0; +} + +static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) +{ + struct iwl_tlv_calib_data *def_calib = + (struct iwl_tlv_calib_data *)data; + u32 ucode_type = le32_to_cpu(def_calib->ucode_type); + if (ucode_type >= IWL_UCODE_TYPE_MAX) { + IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", + ucode_type); + return -EINVAL; + } + drv->fw.default_calib[ucode_type].flow_trigger = + def_calib->calib.flow_trigger; + drv->fw.default_calib[ucode_type].event_trigger = + def_calib->calib.event_trigger; + + return 0; +} + +static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, + struct iwl_ucode_capabilities *capa) +{ + const struct iwl_ucode_api *ucode_api = (void *)data; + u32 api_index = le32_to_cpu(ucode_api->api_index); + u32 api_flags = le32_to_cpu(ucode_api->api_flags); + int i; + + if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { + IWL_ERR(drv, "api_index larger than supported by driver\n"); + /* don't return an error so we can load FW that has more bits */ + return 0; + } + + for (i = 0; i < 32; i++) { + if (api_flags & BIT(i)) + __set_bit(i + 32 * api_index, capa->_api); + } + + return 0; +} + +static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, + struct iwl_ucode_capabilities *capa) +{ + const struct iwl_ucode_capa *ucode_capa = (void *)data; + u32 api_index = le32_to_cpu(ucode_capa->api_index); + u32 api_flags = le32_to_cpu(ucode_capa->api_capa); + int i; + + if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { + IWL_ERR(drv, "api_index larger than supported by driver\n"); + /* don't return an error so we can load FW that has more bits */ + return 0; + } + + for (i = 0; i < 32; i++) { + if (api_flags & BIT(i)) + __set_bit(i + 32 * api_index, capa->_capa); + } + + return 0; +} + +static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, + const struct firmware *ucode_raw, + struct iwl_firmware_pieces *pieces) +{ + struct iwl_ucode_header *ucode = (void *)ucode_raw->data; + u32 api_ver, hdr_size, build; + char buildstr[25]; + const u8 *src; + + drv->fw.ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(drv->fw.ucode_ver); + + switch (api_ver) { + default: + hdr_size = 28; + if (ucode_raw->size < hdr_size) { + IWL_ERR(drv, "File size too small!\n"); + return -EINVAL; + } + build = le32_to_cpu(ucode->u.v2.build); + set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, + le32_to_cpu(ucode->u.v2.inst_size)); + set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, + le32_to_cpu(ucode->u.v2.data_size)); + set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, + le32_to_cpu(ucode->u.v2.init_size)); + set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, + le32_to_cpu(ucode->u.v2.init_data_size)); + src = ucode->u.v2.data; + break; + case 0: + case 1: + case 2: + hdr_size = 24; + if (ucode_raw->size < hdr_size) { + IWL_ERR(drv, "File size too small!\n"); + return -EINVAL; + } + build = 0; + set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, + le32_to_cpu(ucode->u.v1.inst_size)); + set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, + le32_to_cpu(ucode->u.v1.data_size)); + set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, + le32_to_cpu(ucode->u.v1.init_size)); + set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, + le32_to_cpu(ucode->u.v1.init_data_size)); + src = ucode->u.v1.data; + break; + } + + if (build) + sprintf(buildstr, " build %u%s", build, + (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) + ? " (EXP)" : ""); + else + buildstr[0] = '\0'; + + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), + "%u.%u.%u.%u%s", + IWL_UCODE_MAJOR(drv->fw.ucode_ver), + IWL_UCODE_MINOR(drv->fw.ucode_ver), + IWL_UCODE_API(drv->fw.ucode_ver), + IWL_UCODE_SERIAL(drv->fw.ucode_ver), + buildstr); + + /* Verify size of file vs. image size info in file's header */ + + if (ucode_raw->size != hdr_size + + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) + + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) + + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) + + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) { + + IWL_ERR(drv, + "uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + return -EINVAL; + } + + + set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src); + src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST); + set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, + IWLAGN_RTC_INST_LOWER_BOUND); + set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src); + src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA); + set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, + IWLAGN_RTC_DATA_LOWER_BOUND); + set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src); + src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST); + set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, + IWLAGN_RTC_INST_LOWER_BOUND); + set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src); + src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA); + set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, + IWLAGN_RTC_DATA_LOWER_BOUND); + return 0; +} + +static int iwl_parse_tlv_firmware(struct iwl_drv *drv, + const struct firmware *ucode_raw, + struct iwl_firmware_pieces *pieces, + struct iwl_ucode_capabilities *capa) +{ + struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data; + struct iwl_ucode_tlv *tlv; + size_t len = ucode_raw->size; + const u8 *data; + u32 tlv_len; + u32 usniffer_img; + enum iwl_ucode_tlv_type tlv_type; + const u8 *tlv_data; + char buildstr[25]; + u32 build, paging_mem_size; + int num_of_cpus; + bool usniffer_images = false; + bool usniffer_req = false; + bool gscan_capa = false; + + if (len < sizeof(*ucode)) { + IWL_ERR(drv, "uCode has invalid length: %zd\n", len); + return -EINVAL; + } + + if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { + IWL_ERR(drv, "invalid uCode magic: 0X%x\n", + le32_to_cpu(ucode->magic)); + return -EINVAL; + } + + drv->fw.ucode_ver = le32_to_cpu(ucode->ver); + memcpy(drv->fw.human_readable, ucode->human_readable, + sizeof(drv->fw.human_readable)); + build = le32_to_cpu(ucode->build); + + if (build) + sprintf(buildstr, " build %u%s", build, + (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) + ? " (EXP)" : ""); + else + buildstr[0] = '\0'; + + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), + "%u.%u.%u.%u%s", + IWL_UCODE_MAJOR(drv->fw.ucode_ver), + IWL_UCODE_MINOR(drv->fw.ucode_ver), + IWL_UCODE_API(drv->fw.ucode_ver), + IWL_UCODE_SERIAL(drv->fw.ucode_ver), + buildstr); + + data = ucode->data; + + len -= sizeof(*ucode); + + while (len >= sizeof(*tlv)) { + len -= sizeof(*tlv); + tlv = (void *)data; + + tlv_len = le32_to_cpu(tlv->length); + tlv_type = le32_to_cpu(tlv->type); + tlv_data = tlv->data; + + if (len < tlv_len) { + IWL_ERR(drv, "invalid TLV len: %zd/%u\n", + len, tlv_len); + return -EINVAL; + } + len -= ALIGN(tlv_len, 4); + data += sizeof(*tlv) + ALIGN(tlv_len, 4); + + switch (tlv_type) { + case IWL_UCODE_TLV_INST: + set_sec_data(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_INST, tlv_data); + set_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_INST, tlv_len); + set_sec_offset(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_INST, + IWLAGN_RTC_INST_LOWER_BOUND); + break; + case IWL_UCODE_TLV_DATA: + set_sec_data(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_DATA, tlv_data); + set_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_DATA, tlv_len); + set_sec_offset(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_DATA, + IWLAGN_RTC_DATA_LOWER_BOUND); + break; + case IWL_UCODE_TLV_INIT: + set_sec_data(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_INST, tlv_data); + set_sec_size(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_INST, tlv_len); + set_sec_offset(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_INST, + IWLAGN_RTC_INST_LOWER_BOUND); + break; + case IWL_UCODE_TLV_INIT_DATA: + set_sec_data(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_DATA, tlv_data); + set_sec_size(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_DATA, tlv_len); + set_sec_offset(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_DATA, + IWLAGN_RTC_DATA_LOWER_BOUND); + break; + case IWL_UCODE_TLV_BOOT: + IWL_ERR(drv, "Found unexpected BOOT ucode\n"); + break; + case IWL_UCODE_TLV_PROBE_MAX_LEN: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + capa->max_probe_length = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_PAN: + if (tlv_len) + goto invalid_tlv_len; + capa->flags |= IWL_UCODE_TLV_FLAGS_PAN; + break; + case IWL_UCODE_TLV_FLAGS: + /* must be at least one u32 */ + if (tlv_len < sizeof(u32)) + goto invalid_tlv_len; + /* and a proper number of u32s */ + if (tlv_len % sizeof(u32)) + goto invalid_tlv_len; + /* + * This driver only reads the first u32 as + * right now no more features are defined, + * if that changes then either the driver + * will not work with the new firmware, or + * it'll not take advantage of new features. + */ + capa->flags = le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_API_CHANGES_SET: + if (tlv_len != sizeof(struct iwl_ucode_api)) + goto invalid_tlv_len; + if (iwl_set_ucode_api_flags(drv, tlv_data, capa)) + goto tlv_error; + break; + case IWL_UCODE_TLV_ENABLED_CAPABILITIES: + if (tlv_len != sizeof(struct iwl_ucode_capa)) + goto invalid_tlv_len; + if (iwl_set_ucode_capabilities(drv, tlv_data, capa)) + goto tlv_error; + break; + case IWL_UCODE_TLV_INIT_EVTLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->init_evtlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->init_evtlog_size = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_INIT_ERRLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->init_errlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->inst_evtlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->inst_evtlog_size = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->inst_errlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_ENHANCE_SENS_TBL: + if (tlv_len) + goto invalid_tlv_len; + drv->fw.enhance_sensitivity_table = true; + break; + case IWL_UCODE_TLV_WOWLAN_INST: + set_sec_data(pieces, IWL_UCODE_WOWLAN, + IWL_UCODE_SECTION_INST, tlv_data); + set_sec_size(pieces, IWL_UCODE_WOWLAN, + IWL_UCODE_SECTION_INST, tlv_len); + set_sec_offset(pieces, IWL_UCODE_WOWLAN, + IWL_UCODE_SECTION_INST, + IWLAGN_RTC_INST_LOWER_BOUND); + break; + case IWL_UCODE_TLV_WOWLAN_DATA: + set_sec_data(pieces, IWL_UCODE_WOWLAN, + IWL_UCODE_SECTION_DATA, tlv_data); + set_sec_size(pieces, IWL_UCODE_WOWLAN, + IWL_UCODE_SECTION_DATA, tlv_len); + set_sec_offset(pieces, IWL_UCODE_WOWLAN, + IWL_UCODE_SECTION_DATA, + IWLAGN_RTC_DATA_LOWER_BOUND); + break; + case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + capa->standard_phy_calibration_size = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_SEC_RT: + iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, + tlv_len); + drv->fw.mvm_fw = true; + break; + case IWL_UCODE_TLV_SEC_INIT: + iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, + tlv_len); + drv->fw.mvm_fw = true; + break; + case IWL_UCODE_TLV_SEC_WOWLAN: + iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, + tlv_len); + drv->fw.mvm_fw = true; + break; + case IWL_UCODE_TLV_DEF_CALIB: + if (tlv_len != sizeof(struct iwl_tlv_calib_data)) + goto invalid_tlv_len; + if (iwl_set_default_calib(drv, tlv_data)) + goto tlv_error; + break; + case IWL_UCODE_TLV_PHY_SKU: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data); + drv->fw.valid_tx_ant = (drv->fw.phy_config & + FW_PHY_CFG_TX_CHAIN) >> + FW_PHY_CFG_TX_CHAIN_POS; + drv->fw.valid_rx_ant = (drv->fw.phy_config & + FW_PHY_CFG_RX_CHAIN) >> + FW_PHY_CFG_RX_CHAIN_POS; + break; + case IWL_UCODE_TLV_SECURE_SEC_RT: + iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, + tlv_len); + drv->fw.mvm_fw = true; + break; + case IWL_UCODE_TLV_SECURE_SEC_INIT: + iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, + tlv_len); + drv->fw.mvm_fw = true; + break; + case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: + iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, + tlv_len); + drv->fw.mvm_fw = true; + break; + case IWL_UCODE_TLV_NUM_OF_CPU: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + num_of_cpus = + le32_to_cpup((__le32 *)tlv_data); + + if (num_of_cpus == 2) { + drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus = + true; + drv->fw.img[IWL_UCODE_INIT].is_dual_cpus = + true; + drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus = + true; + } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { + IWL_ERR(drv, "Driver support upto 2 CPUs\n"); + return -EINVAL; + } + break; + case IWL_UCODE_TLV_CSCHEME: + if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len)) + goto invalid_tlv_len; + break; + case IWL_UCODE_TLV_N_SCAN_CHANNELS: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + capa->n_scan_channels = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_FW_VERSION: { + __le32 *ptr = (void *)tlv_data; + u32 major, minor; + u8 local_comp; + + if (tlv_len != sizeof(u32) * 3) + goto invalid_tlv_len; + + major = le32_to_cpup(ptr++); + minor = le32_to_cpup(ptr++); + local_comp = le32_to_cpup(ptr); + + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), "%u.%u.%u", + major, minor, local_comp); + break; + } + case IWL_UCODE_TLV_FW_DBG_DEST: { + struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data; + + if (pieces->dbg_dest_tlv) { + IWL_ERR(drv, + "dbg destination ignored, already exists\n"); + break; + } + + pieces->dbg_dest_tlv = dest; + IWL_INFO(drv, "Found debug destination: %s\n", + get_fw_dbg_mode_string(dest->monitor_mode)); + + drv->fw.dbg_dest_reg_num = + tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, + reg_ops); + drv->fw.dbg_dest_reg_num /= + sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); + + break; + } + case IWL_UCODE_TLV_FW_DBG_CONF: { + struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data; + + if (!pieces->dbg_dest_tlv) { + IWL_ERR(drv, + "Ignore dbg config %d - no destination configured\n", + conf->id); + break; + } + + if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) { + IWL_ERR(drv, + "Skip unknown configuration: %d\n", + conf->id); + break; + } + + if (pieces->dbg_conf_tlv[conf->id]) { + IWL_ERR(drv, + "Ignore duplicate dbg config %d\n", + conf->id); + break; + } + + if (conf->usniffer) + usniffer_req = true; + + IWL_INFO(drv, "Found debug configuration: %d\n", + conf->id); + + pieces->dbg_conf_tlv[conf->id] = conf; + pieces->dbg_conf_tlv_len[conf->id] = tlv_len; + break; + } + case IWL_UCODE_TLV_FW_DBG_TRIGGER: { + struct iwl_fw_dbg_trigger_tlv *trigger = + (void *)tlv_data; + u32 trigger_id = le32_to_cpu(trigger->id); + + if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) { + IWL_ERR(drv, + "Skip unknown trigger: %u\n", + trigger->id); + break; + } + + if (pieces->dbg_trigger_tlv[trigger_id]) { + IWL_ERR(drv, + "Ignore duplicate dbg trigger %u\n", + trigger->id); + break; + } + + IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id); + + pieces->dbg_trigger_tlv[trigger_id] = trigger; + pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; + break; + } + case IWL_UCODE_TLV_SEC_RT_USNIFFER: + usniffer_images = true; + iwl_store_ucode_sec(pieces, tlv_data, + IWL_UCODE_REGULAR_USNIFFER, + tlv_len); + break; + case IWL_UCODE_TLV_PAGING: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + paging_mem_size = le32_to_cpup((__le32 *)tlv_data); + + IWL_DEBUG_FW(drv, + "Paging: paging enabled (size = %u bytes)\n", + paging_mem_size); + + if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { + IWL_ERR(drv, + "Paging: driver supports up to %lu bytes for paging image\n", + MAX_PAGING_IMAGE_SIZE); + return -EINVAL; + } + + if (paging_mem_size & (FW_PAGING_SIZE - 1)) { + IWL_ERR(drv, + "Paging: image isn't multiple %lu\n", + FW_PAGING_SIZE); + return -EINVAL; + } + + drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = + paging_mem_size; + usniffer_img = IWL_UCODE_REGULAR_USNIFFER; + drv->fw.img[usniffer_img].paging_mem_size = + paging_mem_size; + break; + case IWL_UCODE_TLV_SDIO_ADMA_ADDR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + drv->fw.sdio_adma_addr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_FW_GSCAN_CAPA: + if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len)) + goto invalid_tlv_len; + gscan_capa = true; + break; + default: + IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); + break; + } + } + + if (usniffer_req && !usniffer_images) { + IWL_ERR(drv, + "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); + return -EINVAL; + } + + if (len) { + IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); + iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len); + return -EINVAL; + } + + /* + * If ucode advertises that it supports GSCAN but GSCAN + * capabilities TLV is not present, warn and continue without GSCAN. + */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && + WARN(!gscan_capa, + "GSCAN is supported but capabilities TLV is unavailable\n")) + __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, + capa->_capa); + + return 0; + + invalid_tlv_len: + IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); + tlv_error: + iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len); + + return -EINVAL; +} + +static int iwl_alloc_ucode(struct iwl_drv *drv, + struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type) +{ + int i; + for (i = 0; + i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i); + i++) + if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]), + get_sec(pieces, type, i))) + return -ENOMEM; + return 0; +} + +static int validate_sec_sizes(struct iwl_drv *drv, + struct iwl_firmware_pieces *pieces, + const struct iwl_cfg *cfg) +{ + IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n", + get_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_INST)); + IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n", + get_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_DATA)); + IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n", + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); + IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n", + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); + + /* Verify that uCode images will fit in card's SRAM. */ + if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > + cfg->max_inst_size) { + IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n", + get_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_INST)); + return -1; + } + + if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > + cfg->max_data_size) { + IWL_ERR(drv, "uCode data len %Zd too large to fit in\n", + get_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_DATA)); + return -1; + } + + if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > + cfg->max_inst_size) { + IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n", + get_sec_size(pieces, IWL_UCODE_INIT, + IWL_UCODE_SECTION_INST)); + return -1; + } + + if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > + cfg->max_data_size) { + IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n", + get_sec_size(pieces, IWL_UCODE_REGULAR, + IWL_UCODE_SECTION_DATA)); + return -1; + } + return 0; +} + +static struct iwl_op_mode * +_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) +{ + const struct iwl_op_mode_ops *ops = op->ops; + struct dentry *dbgfs_dir = NULL; + struct iwl_op_mode *op_mode = NULL; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + if (!drv->dbgfs_op_mode) { + IWL_ERR(drv, + "failed to create opmode debugfs directory\n"); + return op_mode; + } + dbgfs_dir = drv->dbgfs_op_mode; +#endif + + op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (!op_mode) { + debugfs_remove_recursive(drv->dbgfs_op_mode); + drv->dbgfs_op_mode = NULL; + } +#endif + + return op_mode; +} + +static void _iwl_op_mode_stop(struct iwl_drv *drv) +{ + /* op_mode can be NULL if its start failed */ + if (drv->op_mode) { + iwl_op_mode_stop(drv->op_mode); + drv->op_mode = NULL; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(drv->dbgfs_op_mode); + drv->dbgfs_op_mode = NULL; +#endif + } +} + +/** + * iwl_req_fw_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) +{ + struct iwl_drv *drv = context; + struct iwl_fw *fw = &drv->fw; + struct iwl_ucode_header *ucode; + struct iwlwifi_opmode_table *op; + int err; + struct iwl_firmware_pieces *pieces; + const unsigned int api_max = drv->cfg->ucode_api_max; + unsigned int api_ok = drv->cfg->ucode_api_ok; + const unsigned int api_min = drv->cfg->ucode_api_min; + size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; + u32 api_ver; + int i; + bool load_module = false; + + fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; + fw->ucode_capa.standard_phy_calibration_size = + IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; + fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; + + if (!api_ok) + api_ok = api_max; + + pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); + if (!pieces) + return; + + if (!ucode_raw) { + if (drv->fw_index <= api_ok) + IWL_ERR(drv, + "request for firmware file '%s' failed.\n", + drv->firmware_name); + goto try_again; + } + + IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", + drv->firmware_name, ucode_raw->size); + + /* Make sure that we got at least the API version number */ + if (ucode_raw->size < 4) { + IWL_ERR(drv, "File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + if (ucode->ver) + err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); + else + err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, + &fw->ucode_capa); + + if (err) + goto try_again; + + if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) + api_ver = drv->fw.ucode_ver; + else + api_ver = IWL_UCODE_API(drv->fw.ucode_ver); + + /* + * api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward + */ + /* no api version check required for experimental uCode */ + if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) { + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(drv, + "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; + } + + if (api_ver < api_ok) { + if (api_ok != api_max) + IWL_ERR(drv, "Firmware has old API version, " + "expected v%u through v%u, got v%u.\n", + api_ok, api_max, api_ver); + else + IWL_ERR(drv, "Firmware has old API version, " + "expected v%u, got v%u.\n", + api_max, api_ver); + IWL_ERR(drv, "New firmware can be obtained from " + "http://www.intellinuxwireless.org/.\n"); + } + } + + /* + * In mvm uCode there is no difference between data and instructions + * sections. + */ + if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg)) + goto try_again; + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) + if (iwl_alloc_ucode(drv, pieces, i)) + goto out_free_fw; + + if (pieces->dbg_dest_tlv) { + drv->fw.dbg_dest_tlv = + kmemdup(pieces->dbg_dest_tlv, + sizeof(*pieces->dbg_dest_tlv) + + sizeof(pieces->dbg_dest_tlv->reg_ops[0]) * + drv->fw.dbg_dest_reg_num, GFP_KERNEL); + + if (!drv->fw.dbg_dest_tlv) + goto out_free_fw; + } + + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { + if (pieces->dbg_conf_tlv[i]) { + drv->fw.dbg_conf_tlv_len[i] = + pieces->dbg_conf_tlv_len[i]; + drv->fw.dbg_conf_tlv[i] = + kmemdup(pieces->dbg_conf_tlv[i], + drv->fw.dbg_conf_tlv_len[i], + GFP_KERNEL); + if (!drv->fw.dbg_conf_tlv[i]) + goto out_free_fw; + } + } + + memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz)); + + trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] = + sizeof(struct iwl_fw_dbg_trigger_missed_bcon); + trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0; + trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] = + sizeof(struct iwl_fw_dbg_trigger_cmd); + trigger_tlv_sz[FW_DBG_TRIGGER_MLME] = + sizeof(struct iwl_fw_dbg_trigger_mlme); + trigger_tlv_sz[FW_DBG_TRIGGER_STATS] = + sizeof(struct iwl_fw_dbg_trigger_stats); + trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] = + sizeof(struct iwl_fw_dbg_trigger_low_rssi); + trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] = + sizeof(struct iwl_fw_dbg_trigger_txq_timer); + trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = + sizeof(struct iwl_fw_dbg_trigger_time_event); + trigger_tlv_sz[FW_DBG_TRIGGER_BA] = + sizeof(struct iwl_fw_dbg_trigger_ba); + + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { + if (pieces->dbg_trigger_tlv[i]) { + /* + * If the trigger isn't long enough, WARN and exit. + * Someone is trying to debug something and he won't + * be able to catch the bug he is trying to chase. + * We'd better be noisy to be sure he knows what's + * going on. + */ + if (WARN_ON(pieces->dbg_trigger_tlv_len[i] < + (trigger_tlv_sz[i] + + sizeof(struct iwl_fw_dbg_trigger_tlv)))) + goto out_free_fw; + drv->fw.dbg_trigger_tlv_len[i] = + pieces->dbg_trigger_tlv_len[i]; + drv->fw.dbg_trigger_tlv[i] = + kmemdup(pieces->dbg_trigger_tlv[i], + drv->fw.dbg_trigger_tlv_len[i], + GFP_KERNEL); + if (!drv->fw.dbg_trigger_tlv[i]) + goto out_free_fw; + } + } + + /* Now that we can no longer fail, copy information */ + + /* + * The (size - 16) / 12 formula is based on the information recorded + * for each event, which is of mode 1 (including timestamp) for all + * new microcodes that include this information. + */ + fw->init_evtlog_ptr = pieces->init_evtlog_ptr; + if (pieces->init_evtlog_size) + fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; + else + fw->init_evtlog_size = + drv->cfg->base_params->max_event_log_size; + fw->init_errlog_ptr = pieces->init_errlog_ptr; + fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; + if (pieces->inst_evtlog_size) + fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; + else + fw->inst_evtlog_size = + drv->cfg->base_params->max_event_log_size; + fw->inst_errlog_ptr = pieces->inst_errlog_ptr; + + /* + * figure out the offset of chain noise reset and gain commands + * base on the size of standard phy calibration commands table size + */ + if (fw->ucode_capa.standard_phy_calibration_size > + IWL_MAX_PHY_CALIBRATE_TBL_SIZE) + fw->ucode_capa.standard_phy_calibration_size = + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + + mutex_lock(&iwlwifi_opmode_table_mtx); + if (fw->mvm_fw) + op = &iwlwifi_opmode_table[MVM_OP_MODE]; + else + op = &iwlwifi_opmode_table[DVM_OP_MODE]; + + IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", + drv->fw.fw_version, op->name); + + /* add this device to the list of devices using this op_mode */ + list_add_tail(&drv->list, &op->drv); + + if (op->ops) { + drv->op_mode = _iwl_op_mode_start(drv, op); + + if (!drv->op_mode) { + mutex_unlock(&iwlwifi_opmode_table_mtx); + goto out_unbind; + } + } else { + load_module = true; + } + mutex_unlock(&iwlwifi_opmode_table_mtx); + + /* + * Complete the firmware request last so that + * a driver unbind (stop) doesn't run while we + * are doing the start() above. + */ + complete(&drv->request_firmware_complete); + + /* + * Load the module last so we don't block anything + * else from proceeding if the module fails to load + * or hangs loading. + */ + if (load_module) { + err = request_module("%s", op->name); +#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR + if (err) + IWL_ERR(drv, + "failed to load module %s (error %d), is dynamic loading enabled?\n", + op->name, err); +#endif + } + kfree(pieces); + return; + + try_again: + /* try next, if any */ + release_firmware(ucode_raw); + if (iwl_request_firmware(drv, false)) + goto out_unbind; + kfree(pieces); + return; + + out_free_fw: + IWL_ERR(drv, "failed to allocate pci memory\n"); + iwl_dealloc_ucode(drv); + release_firmware(ucode_raw); + out_unbind: + kfree(pieces); + complete(&drv->request_firmware_complete); + device_release_driver(drv->trans->dev); +} + +struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, + const struct iwl_cfg *cfg) +{ + struct iwl_drv *drv; + int ret; + + drv = kzalloc(sizeof(*drv), GFP_KERNEL); + if (!drv) { + ret = -ENOMEM; + goto err; + } + + drv->trans = trans; + drv->dev = trans->dev; + drv->cfg = cfg; + + init_completion(&drv->request_firmware_complete); + INIT_LIST_HEAD(&drv->list); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* Create the device debugfs entries. */ + drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), + iwl_dbgfs_root); + + if (!drv->dbgfs_drv) { + IWL_ERR(drv, "failed to create debugfs directory\n"); + ret = -ENOMEM; + goto err_free_drv; + } + + /* Create transport layer debugfs dir */ + drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); + + if (!drv->trans->dbgfs_dir) { + IWL_ERR(drv, "failed to create transport debugfs directory\n"); + ret = -ENOMEM; + goto err_free_dbgfs; + } +#endif + + ret = iwl_request_firmware(drv, true); + if (ret) { + IWL_ERR(trans, "Couldn't request the fw\n"); + goto err_fw; + } + + return drv; + +err_fw: +#ifdef CONFIG_IWLWIFI_DEBUGFS +err_free_dbgfs: + debugfs_remove_recursive(drv->dbgfs_drv); +err_free_drv: +#endif + kfree(drv); +err: + return ERR_PTR(ret); +} + +void iwl_drv_stop(struct iwl_drv *drv) +{ + wait_for_completion(&drv->request_firmware_complete); + + _iwl_op_mode_stop(drv); + + iwl_dealloc_ucode(drv); + + mutex_lock(&iwlwifi_opmode_table_mtx); + /* + * List is empty (this item wasn't added) + * when firmware loading failed -- in that + * case we can't remove it from any list. + */ + if (!list_empty(&drv->list)) + list_del(&drv->list); + mutex_unlock(&iwlwifi_opmode_table_mtx); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(drv->dbgfs_drv); +#endif + + kfree(drv); +} + + +/* shared module parameters */ +struct iwl_mod_params iwlwifi_mod_params = { + .restart_fw = true, + .bt_coex_active = true, + .power_level = IWL_POWER_INDEX_1, + .d0i3_disable = true, +#ifndef CONFIG_IWLWIFI_UAPSD + .uapsd_disable = true, +#endif /* CONFIG_IWLWIFI_UAPSD */ + /* the rest are 0 by default */ +}; +IWL_EXPORT_SYMBOL(iwlwifi_mod_params); + +int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) +{ + int i; + struct iwl_drv *drv; + struct iwlwifi_opmode_table *op; + + mutex_lock(&iwlwifi_opmode_table_mtx); + for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { + op = &iwlwifi_opmode_table[i]; + if (strcmp(op->name, name)) + continue; + op->ops = ops; + /* TODO: need to handle exceptional case */ + list_for_each_entry(drv, &op->drv, list) + drv->op_mode = _iwl_op_mode_start(drv, op); + + mutex_unlock(&iwlwifi_opmode_table_mtx); + return 0; + } + mutex_unlock(&iwlwifi_opmode_table_mtx); + return -EIO; +} +IWL_EXPORT_SYMBOL(iwl_opmode_register); + +void iwl_opmode_deregister(const char *name) +{ + int i; + struct iwl_drv *drv; + + mutex_lock(&iwlwifi_opmode_table_mtx); + for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { + if (strcmp(iwlwifi_opmode_table[i].name, name)) + continue; + iwlwifi_opmode_table[i].ops = NULL; + + /* call the stop routine for all devices */ + list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) + _iwl_op_mode_stop(drv); + + mutex_unlock(&iwlwifi_opmode_table_mtx); + return; + } + mutex_unlock(&iwlwifi_opmode_table_mtx); +} +IWL_EXPORT_SYMBOL(iwl_opmode_deregister); + +static int __init iwl_drv_init(void) +{ + int i; + + mutex_init(&iwlwifi_opmode_table_mtx); + + for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) + INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); + + pr_info(DRV_DESCRIPTION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* Create the root of iwlwifi debugfs subsystem. */ + iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); + + if (!iwl_dbgfs_root) + return -EFAULT; +#endif + + return iwl_pci_register_driver(); +} +module_init(iwl_drv_init); + +static void __exit iwl_drv_exit(void) +{ + iwl_pci_unregister_driver(); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(iwl_dbgfs_root); +#endif +} +module_exit(iwl_drv_exit); + +#ifdef CONFIG_IWLWIFI_DEBUG +module_param_named(debug, iwlwifi_mod_params.debug_level, uint, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + +module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); +MODULE_PARM_DESC(11n_disable, + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); +module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); +module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); + +module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling, + int, S_IRUGO); +MODULE_PARM_DESC(antenna_coupling, + "specify antenna coupling in dB (default: 0 dB)"); + +module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); +MODULE_PARM_DESC(nvm_file, "NVM file name"); + +module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, + bool, S_IRUGO); +MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); + +module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, + bool, S_IRUGO); +MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); + +module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, + bool, S_IRUGO | S_IWUSR); +#ifdef CONFIG_IWLWIFI_UAPSD +MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); +#else +MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)"); +#endif + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, + bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); + +module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "0=system default, " + "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); + +module_param_named(power_save, iwlwifi_mod_params.power_save, + bool, S_IRUGO); +MODULE_PARM_DESC(power_save, + "enable WiFi power management (default: disable)"); + +module_param_named(power_level, iwlwifi_mod_params.power_level, + int, S_IRUGO); +MODULE_PARM_DESC(power_level, + "default power save level (range from 1 - 5, default: 1)"); + +module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, S_IRUGO); +MODULE_PARM_DESC(fw_monitor, + "firmware monitor - to debug FW (default: false - needs lots of memory)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h new file mode 100644 index 000000000000..cda746b33db1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -0,0 +1,155 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_drv_h__ +#define __iwl_drv_h__ +#include + +/* for all modules */ +#define DRV_NAME "iwlwifi" +#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation" +#define DRV_AUTHOR "" + +/* radio config bits (actual values from NVM definition) */ +#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ +#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF) +#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF) +#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF) +#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF) +#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF) +#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF) + +/** + * DOC: Driver system flows - drv component + * + * This component implements the system flows such as bus enumeration, bus + * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in + * bus specific files (transport files). This is the code that is common among + * different buses. + * + * This component is also in charge of managing the several implementations of + * the wifi flows: it will allow to have several fw API implementation. These + * different implementations will differ in the way they implement mac80211's + * handlers too. + + * The init flow wrt to the drv component looks like this: + * 1) The bus specific component is called from module_init + * 2) The bus specific component registers the bus driver + * 3) The bus driver calls the probe function + * 4) The bus specific component configures the bus + * 5) The bus specific component calls to the drv bus agnostic part + * (iwl_drv_start) + * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback + * 7) iwl_req_fw_callback parses the fw file + * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw + */ + +struct iwl_drv; +struct iwl_trans; +struct iwl_cfg; +/** + * iwl_drv_start - start the drv + * + * @trans_ops: the ops of the transport + * @cfg: device specific constants / virtual functions + * + * starts the driver: fetches the firmware. This should be called by bus + * specific system flows implementations. For example, the bus specific probe + * function should do bus related operations only, and then call to this + * function. It returns the driver object or %NULL if an error occurred. + */ +struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, + const struct iwl_cfg *cfg); + +/** + * iwl_drv_stop - stop the drv + * + * @drv: + * + * Stop the driver. This should be called by bus specific system flows + * implementations. For example, the bus specific remove function should first + * call this function and then do the bus related operations only. + */ +void iwl_drv_stop(struct iwl_drv *drv); + +/* + * exported symbol management + * + * The driver can be split into multiple modules, in which case some symbols + * must be exported for the sub-modules. However, if it's not split and + * everything is built-in, then we can avoid that. + */ +#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR +#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym) +#else +#define IWL_EXPORT_SYMBOL(sym) +#endif + +#endif /* __iwl_drv_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c new file mode 100644 index 000000000000..acc3d186c5c1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -0,0 +1,947 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include +#include +#include +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "iwl-eeprom-parse.h" + +/* EEPROM offset definitions */ + +/* indirect access definitions */ +#define ADDRESS_MSK 0x0000FFFF +#define INDIRECT_TYPE_MSK 0x000F0000 +#define INDIRECT_HOST 0x00010000 +#define INDIRECT_GENERAL 0x00020000 +#define INDIRECT_REGULATORY 0x00030000 +#define INDIRECT_CALIBRATION 0x00040000 +#define INDIRECT_PROCESS_ADJST 0x00050000 +#define INDIRECT_OTHERS 0x00060000 +#define INDIRECT_TXP_LIMIT 0x00070000 +#define INDIRECT_TXP_LIMIT_SIZE 0x00080000 +#define INDIRECT_ADDRESS 0x00100000 + +/* corresponding link offsets in EEPROM */ +#define EEPROM_LINK_HOST (2*0x64) +#define EEPROM_LINK_GENERAL (2*0x65) +#define EEPROM_LINK_REGULATORY (2*0x66) +#define EEPROM_LINK_CALIBRATION (2*0x67) +#define EEPROM_LINK_PROCESS_ADJST (2*0x68) +#define EEPROM_LINK_OTHERS (2*0x69) +#define EEPROM_LINK_TXP_LIMIT (2*0x6a) +#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b) + +/* General */ +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */ +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ +#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ +#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ +#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ + +/* calibration */ +struct iwl_eeprom_calib_hdr { + u8 version; + u8 pa_type; + __le16 voltage; +} __packed; + +#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) +#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL) + +/* temperature */ +#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL) +#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL) + +/* SKU Capabilities (actual values from EEPROM definition) */ +enum eeprom_sku_bits { + EEPROM_SKU_CAP_BAND_24GHZ = BIT(4), + EEPROM_SKU_CAP_BAND_52GHZ = BIT(5), + EEPROM_SKU_CAP_11N_ENABLE = BIT(6), + EEPROM_SKU_CAP_AMT_ENABLE = BIT(7), + EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8) +}; + +/* radio config bits (actual values from EEPROM definition) */ +#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ +#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + + +/* + * EEPROM bands + * These are the channel numbers from each band in the order + * that they are stored in the EEPROM band information. Note + * that EEPROM bands aren't the same as mac80211 bands, and + * there are even special "ht40 bands" in the EEPROM. + */ +static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */ + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \ + ARRAY_SIZE(iwl_eeprom_band_2) + \ + ARRAY_SIZE(iwl_eeprom_band_3) + \ + ARRAY_SIZE(iwl_eeprom_band_4) + \ + ARRAY_SIZE(iwl_eeprom_band_5)) + +/* rate data (static) */ +static struct ieee80211_rate iwl_cfg80211_rates[] = { + { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, + { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, + { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, + { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, + { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, + { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, + { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, + { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, + { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, +}; +#define RATES_24_OFFS 0 +#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) +#define RATES_52_OFFS 4 +#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) + +/* EEPROM reading functions */ + +static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset) +{ + if (WARN_ON(offset + sizeof(u16) > eeprom_size)) + return 0; + return le16_to_cpup((__le16 *)(eeprom + offset)); +} + +static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size, + u32 address) +{ + u16 offset = 0; + + if ((address & INDIRECT_ADDRESS) == 0) + return address; + + switch (address & INDIRECT_TYPE_MSK) { + case INDIRECT_HOST: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_HOST); + break; + case INDIRECT_GENERAL: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_GENERAL); + break; + case INDIRECT_REGULATORY: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_REGULATORY); + break; + case INDIRECT_TXP_LIMIT: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_TXP_LIMIT); + break; + case INDIRECT_TXP_LIMIT_SIZE: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_TXP_LIMIT_SIZE); + break; + case INDIRECT_CALIBRATION: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_CALIBRATION); + break; + case INDIRECT_PROCESS_ADJST: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_PROCESS_ADJST); + break; + case INDIRECT_OTHERS: + offset = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_LINK_OTHERS); + break; + default: + WARN_ON(1); + break; + } + + /* translate the offset from words to byte */ + return (address & ADDRESS_MSK) + (offset << 1); +} + +static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size, + u32 offset) +{ + u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset); + + if (WARN_ON(address >= eeprom_size)) + return NULL; + + return &eeprom[address]; +} + +static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size, + struct iwl_nvm_data *data) +{ + struct iwl_eeprom_calib_hdr *hdr; + + hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, + EEPROM_CALIB_ALL); + if (!hdr) + return -ENODATA; + data->calib_version = hdr->version; + data->calib_voltage = hdr->voltage; + + return 0; +} + +/** + * enum iwl_eeprom_channel_flags - channel flags in EEPROM + * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo + * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel + * @EEPROM_CHANNEL_ACTIVE: active scanning allowed + * @EEPROM_CHANNEL_RADAR: radar detection required + * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?) + * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate + */ +enum iwl_eeprom_channel_flags { + EEPROM_CHANNEL_VALID = BIT(0), + EEPROM_CHANNEL_IBSS = BIT(1), + EEPROM_CHANNEL_ACTIVE = BIT(3), + EEPROM_CHANNEL_RADAR = BIT(4), + EEPROM_CHANNEL_WIDE = BIT(5), + EEPROM_CHANNEL_DFS = BIT(7), +}; + +/** + * struct iwl_eeprom_channel - EEPROM channel data + * @flags: %EEPROM_CHANNEL_* flags + * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm + */ +struct iwl_eeprom_channel { + u8 flags; + s8 max_power_avg; +} __packed; + + +enum iwl_eeprom_enhanced_txpwr_flags { + IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0), + IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1), + IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2), + IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3), + IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4), + IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5), + IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6), + IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7), +}; + +/** + * iwl_eeprom_enhanced_txpwr structure + * @flags: entry flags + * @channel: channel number + * @chain_a_max_pwr: chain a max power in 1/2 dBm + * @chain_b_max_pwr: chain b max power in 1/2 dBm + * @chain_c_max_pwr: chain c max power in 1/2 dBm + * @delta_20_in_40: 20-in-40 deltas (hi/lo) + * @mimo2_max_pwr: mimo2 max power in 1/2 dBm + * @mimo3_max_pwr: mimo3 max power in 1/2 dBm + * + * This structure presents the enhanced regulatory tx power limit layout + * in an EEPROM image. + */ +struct iwl_eeprom_enhanced_txpwr { + u8 flags; + u8 channel; + s8 chain_a_max; + s8 chain_b_max; + s8 chain_c_max; + u8 delta_20_in_40; + s8 mimo2_max; + s8 mimo3_max; +} __packed; + +static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data, + struct iwl_eeprom_enhanced_txpwr *txp) +{ + s8 result = 0; /* (.5 dBm) */ + + /* Take the highest tx power from any valid chains */ + if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result) + result = txp->chain_a_max; + + if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result) + result = txp->chain_b_max; + + if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result) + result = txp->chain_c_max; + + if ((data->valid_tx_ant == ANT_AB || + data->valid_tx_ant == ANT_BC || + data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result) + result = txp->mimo2_max; + + if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result) + result = txp->mimo3_max; + + return result; +} + +#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT) +#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr) +#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE) + +#define TXP_CHECK_AND_PRINT(x) \ + ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "") + +static void +iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, + struct iwl_eeprom_enhanced_txpwr *txp, + int n_channels, s8 max_txpower_avg) +{ + int ch_idx; + enum ieee80211_band band; + + band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? + IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + + for (ch_idx = 0; ch_idx < n_channels; ch_idx++) { + struct ieee80211_channel *chan = &data->channels[ch_idx]; + + /* update matching channel or from common data only */ + if (txp->channel != 0 && chan->hw_value != txp->channel) + continue; + + /* update matching band only */ + if (band != chan->band) + continue; + + if (chan->max_power < max_txpower_avg && + !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ)) + chan->max_power = max_txpower_avg; + } +} + +static void iwl_eeprom_enhanced_txpower(struct device *dev, + struct iwl_nvm_data *data, + const u8 *eeprom, size_t eeprom_size, + int n_channels) +{ + struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; + int idx, entries; + __le16 *txp_len; + s8 max_txp_avg_halfdbm; + + BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); + + /* the length is in 16-bit words, but we want entries */ + txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size, + EEPROM_TXP_SZ_OFFS); + entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; + + txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, + EEPROM_TXP_OFFS); + + for (idx = 0; idx < entries; idx++) { + txp = &txp_array[idx]; + /* skip invalid entries */ + if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) + continue; + + IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n", + (txp->channel && (txp->flags & + IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ? + "Common " : (txp->channel) ? + "Channel" : "Common", + (txp->channel), + TXP_CHECK_AND_PRINT(VALID), + TXP_CHECK_AND_PRINT(BAND_52G), + TXP_CHECK_AND_PRINT(OFDM), + TXP_CHECK_AND_PRINT(40MHZ), + TXP_CHECK_AND_PRINT(HT_AP), + TXP_CHECK_AND_PRINT(RES1), + TXP_CHECK_AND_PRINT(RES2), + TXP_CHECK_AND_PRINT(COMMON_TYPE), + txp->flags); + IWL_DEBUG_EEPROM(dev, + "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n", + txp->chain_a_max, txp->chain_b_max, + txp->chain_c_max); + IWL_DEBUG_EEPROM(dev, + "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n", + txp->mimo2_max, txp->mimo3_max, + ((txp->delta_20_in_40 & 0xf0) >> 4), + (txp->delta_20_in_40 & 0x0f)); + + max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp); + + iwl_eeprom_enh_txp_read_element(data, txp, n_channels, + DIV_ROUND_UP(max_txp_avg_halfdbm, 2)); + + if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm) + data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm; + } +} + +static void iwl_init_band_reference(const struct iwl_cfg *cfg, + const u8 *eeprom, size_t eeprom_size, + int eeprom_band, int *eeprom_ch_count, + const struct iwl_eeprom_channel **ch_info, + const u8 **eeprom_ch_array) +{ + u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1]; + + offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY; + + *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset); + + switch (eeprom_band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_array = iwl_eeprom_band_1; + break; + case 2: /* 4.9GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_array = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_array = iwl_eeprom_band_3; + break; + case 4: /* 5.5GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_array = iwl_eeprom_band_4; + break; + case 5: /* 5.7GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_array = iwl_eeprom_band_5; + break; + case 6: /* 2.4GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); + *eeprom_ch_array = iwl_eeprom_band_6; + break; + case 7: /* 5 GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); + *eeprom_ch_array = iwl_eeprom_band_7; + break; + default: + *eeprom_ch_count = 0; + *eeprom_ch_array = NULL; + WARN_ON(1); + } +} + +#define CHECK_AND_PRINT(x) \ + ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "") + +static void iwl_mod_ht40_chan_info(struct device *dev, + struct iwl_nvm_data *data, int n_channels, + enum ieee80211_band band, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 clear_ht40_extension_channel) +{ + struct ieee80211_channel *chan = NULL; + int i; + + for (i = 0; i < n_channels; i++) { + if (data->channels[i].band != band) + continue; + if (data->channels[i].hw_value != channel) + continue; + chan = &data->channels[i]; + break; + } + + if (!chan) + return; + + IWL_DEBUG_EEPROM(dev, + "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", + channel, + band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && + !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" + : "not "); + + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) + chan->flags &= ~clear_ht40_extension_channel; +} + +#define CHECK_AND_PRINT_I(x) \ + ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "") + +static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const u8 *eeprom, size_t eeprom_size) +{ + int band, ch_idx; + const struct iwl_eeprom_channel *eeprom_ch_info; + const u8 *eeprom_ch_array; + int eeprom_ch_count; + int n_channels = 0; + + /* + * Loop through the 5 EEPROM bands and add them to the parse list + */ + for (band = 1; band <= 5; band++) { + struct ieee80211_channel *channel; + + iwl_init_band_reference(cfg, eeprom, eeprom_size, band, + &eeprom_ch_count, &eeprom_ch_info, + &eeprom_ch_array); + + /* Loop through each band adding each of the channels */ + for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { + const struct iwl_eeprom_channel *eeprom_ch; + + eeprom_ch = &eeprom_ch_info[ch_idx]; + + if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) { + IWL_DEBUG_EEPROM(dev, + "Ch. %d Flags %x [%sGHz] - No traffic\n", + eeprom_ch_array[ch_idx], + eeprom_ch_info[ch_idx].flags, + (band != 1) ? "5.2" : "2.4"); + continue; + } + + channel = &data->channels[n_channels]; + n_channels++; + + channel->hw_value = eeprom_ch_array[ch_idx]; + channel->band = (band == 1) ? IEEE80211_BAND_2GHZ + : IEEE80211_BAND_5GHZ; + channel->center_freq = + ieee80211_channel_to_frequency( + channel->hw_value, channel->band); + + /* set no-HT40, will enable as appropriate later */ + channel->flags = IEEE80211_CHAN_NO_HT40; + + if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS)) + channel->flags |= IEEE80211_CHAN_NO_IR; + + if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE)) + channel->flags |= IEEE80211_CHAN_NO_IR; + + if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR) + channel->flags |= IEEE80211_CHAN_RADAR; + + /* Initialize regulatory-based run-time data */ + channel->max_power = + eeprom_ch_info[ch_idx].max_power_avg; + IWL_DEBUG_EEPROM(dev, + "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", + channel->hw_value, + (band != 1) ? "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + eeprom_ch_info[ch_idx].flags, + eeprom_ch_info[ch_idx].max_power_avg, + ((eeprom_ch_info[ch_idx].flags & + EEPROM_CHANNEL_IBSS) && + !(eeprom_ch_info[ch_idx].flags & + EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + } + } + + if (cfg->eeprom_params->enhanced_txpower) { + /* + * for newer device (6000 series and up) + * EEPROM contain enhanced tx power information + * driver need to process addition information + * to determine the max channel tx power limits + */ + iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size, + n_channels); + } else { + /* All others use data from channel map */ + int i; + + data->max_tx_pwr_half_dbm = -128; + + for (i = 0; i < n_channels; i++) + data->max_tx_pwr_half_dbm = + max_t(s8, data->max_tx_pwr_half_dbm, + data->channels[i].max_power * 2); + } + + /* Check if we do have HT40 channels */ + if (cfg->eeprom_params->regulatory_bands[5] == + EEPROM_REGULATORY_BAND_NO_HT40 && + cfg->eeprom_params->regulatory_bands[6] == + EEPROM_REGULATORY_BAND_NO_HT40) + return n_channels; + + /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ + for (band = 6; band <= 7; band++) { + enum ieee80211_band ieeeband; + + iwl_init_band_reference(cfg, eeprom, eeprom_size, band, + &eeprom_ch_count, &eeprom_ch_info, + &eeprom_ch_array); + + /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ + ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ + : IEEE80211_BAND_5GHZ; + + /* Loop through each band adding each of the channels */ + for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { + /* Set up driver's info for lower half */ + iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, + eeprom_ch_array[ch_idx], + &eeprom_ch_info[ch_idx], + IEEE80211_CHAN_NO_HT40PLUS); + + /* Set up driver's info for upper half */ + iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, + eeprom_ch_array[ch_idx] + 4, + &eeprom_ch_info[ch_idx], + IEEE80211_CHAN_NO_HT40MINUS); + } + } + + return n_channels; +} + +int iwl_init_sband_channels(struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + int n_channels, enum ieee80211_band band) +{ + struct ieee80211_channel *chan = &data->channels[0]; + int n = 0, idx = 0; + + while (idx < n_channels && chan->band != band) + chan = &data->channels[++idx]; + + sband->channels = &data->channels[idx]; + + while (idx < n_channels && chan->band == band) { + chan = &data->channels[++idx]; + n++; + } + + sband->n_channels = n; + + return n; +} + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ + +void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band, + u8 tx_chains, u8 rx_chains) +{ + int max_bit_rate = 0; + + tx_chains = hweight8(tx_chains); + if (cfg->rx_with_siso_diversity) + rx_chains = 1; + else + rx_chains = hweight8(rx_chains); + + if (!(data->sku_cap_11n_enable) || !cfg->ht_params) { + ht_info->ht_supported = false; + return; + } + + if (data->sku_cap_mimo_disabled) + rx_chains = 1; + + ht_info->ht_supported = true; + ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; + + if (cfg->ht_params->stbc) { + ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + if (tx_chains > 1) + ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; + } + + if (cfg->ht_params->ldpc) + ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; + + if (iwlwifi_mod_params.amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent; + ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + if (cfg->ht_params->ht_greenfield_support) + ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + + max_bit_rate = MAX_BIT_RATE_20_MHZ; + + if (cfg->ht_params->ht40_bands & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains != rx_chains) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const u8 *eeprom, size_t eeprom_size) +{ + int n_channels = iwl_init_channel_map(dev, cfg, data, + eeprom, eeprom_size); + int n_used = 0; + struct ieee80211_supported_band *sband; + + sband = &data->bands[IEEE80211_BAND_2GHZ]; + sband->band = IEEE80211_BAND_2GHZ; + sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; + sband->n_bitrates = N_RATES_24; + n_used += iwl_init_sband_channels(data, sband, n_channels, + IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + data->valid_tx_ant, data->valid_rx_ant); + + sband = &data->bands[IEEE80211_BAND_5GHZ]; + sband->band = IEEE80211_BAND_5GHZ; + sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; + sband->n_bitrates = N_RATES_52; + n_used += iwl_init_sband_channels(data, sband, n_channels, + IEEE80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + data->valid_tx_ant, data->valid_rx_ant); + + if (n_channels != n_used) + IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", + n_used, n_channels); +} + +/* EEPROM data functions */ + +struct iwl_nvm_data * +iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, + const u8 *eeprom, size_t eeprom_size) +{ + struct iwl_nvm_data *data; + const void *tmp; + u16 radio_cfg, sku; + + if (WARN_ON(!cfg || !cfg->eeprom_params)) + return NULL; + + data = kzalloc(sizeof(*data) + + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + GFP_KERNEL); + if (!data) + return NULL; + + /* get MAC address(es) */ + tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS); + if (!tmp) + goto err_free; + memcpy(data->hw_addr, tmp, ETH_ALEN); + data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_NUM_MAC_ADDRESS); + + if (iwl_eeprom_read_calib(eeprom, eeprom_size, data)) + goto err_free; + + tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL); + if (!tmp) + goto err_free; + memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib)); + + tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, + EEPROM_RAW_TEMPERATURE); + if (!tmp) + goto err_free; + data->raw_temperature = *(__le16 *)tmp; + + tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, + EEPROM_KELVIN_TEMPERATURE); + if (!tmp) + goto err_free; + data->kelvin_temperature = *(__le16 *)tmp; + data->kelvin_voltage = *((__le16 *)tmp + 1); + + radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_RADIO_CONFIG); + data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg); + data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg); + data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg); + data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg); + data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); + data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); + + sku = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_SKU_CAP); + data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE; + data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE; + data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; + data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE; + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) + data->sku_cap_11n_enable = false; + + data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size, + EEPROM_VERSION); + + /* check overrides (some devices have wrong EEPROM) */ + if (cfg->valid_tx_ant) + data->valid_tx_ant = cfg->valid_tx_ant; + if (cfg->valid_rx_ant) + data->valid_rx_ant = cfg->valid_rx_ant; + + if (!data->valid_tx_ant || !data->valid_rx_ant) { + IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", + data->valid_tx_ant, data->valid_rx_ant); + goto err_free; + } + + iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size); + + return data; + err_free: + kfree(data); + return NULL; +} +IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); + +/* helper functions */ +int iwl_nvm_check_version(struct iwl_nvm_data *data, + struct iwl_trans *trans) +{ + if (data->nvm_version >= trans->cfg->nvm_ver || + data->calib_version >= trans->cfg->nvm_calib_ver) { + IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", + data->nvm_version, data->calib_version); + return 0; + } + + IWL_ERR(trans, + "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", + data->nvm_version, trans->cfg->nvm_ver, + data->calib_version, trans->cfg->nvm_calib_ver); + return -EINVAL; +} +IWL_EXPORT_SYMBOL(iwl_nvm_check_version); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h new file mode 100644 index 000000000000..750c8c9ee70d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#ifndef __iwl_eeprom_parse_h__ +#define __iwl_eeprom_parse_h__ + +#include +#include +#include "iwl-trans.h" + +struct iwl_nvm_data { + int n_hw_addrs; + u8 hw_addr[ETH_ALEN]; + + u8 calib_version; + __le16 calib_voltage; + + __le16 raw_temperature; + __le16 kelvin_temperature; + __le16 kelvin_voltage; + __le16 xtal_calib[2]; + + bool sku_cap_band_24GHz_enable; + bool sku_cap_band_52GHz_enable; + bool sku_cap_11n_enable; + bool sku_cap_11ac_enable; + bool sku_cap_amt_enable; + bool sku_cap_ipan_enable; + bool sku_cap_mimo_disabled; + + u16 radio_cfg_type; + u8 radio_cfg_step; + u8 radio_cfg_dash; + u8 radio_cfg_pnum; + u8 valid_tx_ant, valid_rx_ant; + + u32 nvm_version; + s8 max_tx_pwr_half_dbm; + + bool lar_enabled; + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + struct ieee80211_channel channels[]; +}; + +/** + * iwl_parse_eeprom_data - parse EEPROM data and return values + * + * @dev: device pointer we're parsing for, for debug only + * @cfg: device configuration for parsing and overrides + * @eeprom: the EEPROM data + * @eeprom_size: length of the EEPROM data + * + * This function parses all EEPROM values we need and then + * returns a (newly allocated) struct containing all the + * relevant values for driver use. The struct must be freed + * later with iwl_free_nvm_data(). + */ +struct iwl_nvm_data * +iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, + const u8 *eeprom, size_t eeprom_size); + +/** + * iwl_free_nvm_data - free NVM data + * @data: the data to free + */ +static inline void iwl_free_nvm_data(struct iwl_nvm_data *data) +{ + kfree(data); +} + +int iwl_nvm_check_version(struct iwl_nvm_data *data, + struct iwl_trans *trans); + +int iwl_init_sband_channels(struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + int n_channels, enum ieee80211_band band); + +void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band, + u8 tx_chains, u8 rx_chains); + +#endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c new file mode 100644 index 000000000000..219ca8acca62 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -0,0 +1,464 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include +#include +#include + +#include "iwl-drv.h" +#include "iwl-debug.h" +#include "iwl-eeprom-read.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "iwl-csr.h" + +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ + +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ + +#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ +#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) +{ + u16 count; + int ret; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_EEPROM(trans->dev, + "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } + } + + return ret; +} + +static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) +{ + iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); +} + +static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) +{ + u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + + IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); + + switch (gp) { + case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: + if (!nvm_is_otp) { + IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", + gp); + return -ENOENT; + } + return 0; + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + if (nvm_is_otp) { + IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); + return -ENOENT; + } + return 0; + case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: + default: + IWL_ERR(trans, + "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", + nvm_is_otp ? "OTP" : "EEPROM", gp); + return -ENOENT; + } +} + +/****************************************************************************** + * + * OTP related functions + * +******************************************************************************/ + +static void iwl_set_otp_access_absolute(struct iwl_trans *trans) +{ + iwl_read32(trans, CSR_OTP_GP_REG); + + iwl_clear_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_OTP_ACCESS_MODE); +} + +static int iwl_nvm_is_otp(struct iwl_trans *trans) +{ + u32 otpgp; + + /* OTP only valid for CP/PP and after */ + switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { + case CSR_HW_REV_TYPE_NONE: + IWL_ERR(trans, "Unknown hardware type\n"); + return -EIO; + case CSR_HW_REV_TYPE_5300: + case CSR_HW_REV_TYPE_5350: + case CSR_HW_REV_TYPE_5100: + case CSR_HW_REV_TYPE_5150: + return 0; + default: + otpgp = iwl_read32(trans, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) + return 1; + return 0; + } +} + +static int iwl_init_otp_access(struct iwl_trans *trans) +{ + int ret; + + /* Enable 40MHz radio clock */ + iwl_write32(trans, CSR_GP_CNTRL, + iwl_read32(trans, CSR_GP_CNTRL) | + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* wait for clock to be ready */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (ret < 0) { + IWL_ERR(trans, "Time out access OTP\n"); + } else { + iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + /* + * CSR auto clock gate disable bit - + * this is only applicable for HW with OTP shadow RAM + */ + if (trans->cfg->base_params->shadow_ram_support) + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } + return ret; +} + +static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, + __le16 *eeprom_data) +{ + int ret = 0; + u32 r; + u32 otpgp; + + iwl_write32(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); + return ret; + } + r = iwl_read32(trans, CSR_EEPROM_REG); + /* check for ECC errors: */ + otpgp = iwl_read32(trans, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { + /* stop in this case */ + /* set the uncorrectable OTP ECC bit for acknowledgment */ + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); + return -EINVAL; + } + if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { + /* continue in this case */ + /* set the correctable OTP ECC bit for acknowledgment */ + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); + IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); + } + *eeprom_data = cpu_to_le16(r >> 16); + return 0; +} + +/* + * iwl_is_otp_empty: check for empty OTP + */ +static bool iwl_is_otp_empty(struct iwl_trans *trans) +{ + u16 next_link_addr = 0; + __le16 link_value; + bool is_empty = false; + + /* locate the beginning of OTP link list */ + if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { + if (!link_value) { + IWL_ERR(trans, "OTP is empty\n"); + is_empty = true; + } + } else { + IWL_ERR(trans, "Unable to read first block of OTP list.\n"); + is_empty = true; + } + + return is_empty; +} + + +/* + * iwl_find_otp_image: find EEPROM image in OTP + * finding the OTP block that contains the EEPROM image. + * the last valid block on the link list (the block _before_ the last block) + * is the block we should read and used to configure the device. + * If all the available OTP blocks are full, the last block will be the block + * we should read and used to configure the device. + * only perform this operation if shadow RAM is disabled + */ +static int iwl_find_otp_image(struct iwl_trans *trans, + u16 *validblockaddr) +{ + u16 next_link_addr = 0, valid_addr; + __le16 link_value = 0; + int usedblocks = 0; + + /* set addressing mode to absolute to traverse the link list */ + iwl_set_otp_access_absolute(trans); + + /* checking for empty OTP or error */ + if (iwl_is_otp_empty(trans)) + return -EINVAL; + + /* + * start traverse link list + * until reach the max number of OTP blocks + * different devices have different number of OTP blocks + */ + do { + /* save current valid block address + * check for more block on the link list + */ + valid_addr = next_link_addr; + next_link_addr = le16_to_cpu(link_value) * sizeof(u16); + IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", + usedblocks, next_link_addr); + if (iwl_read_otp_word(trans, next_link_addr, &link_value)) + return -EINVAL; + if (!link_value) { + /* + * reach the end of link list, return success and + * set address point to the starting address + * of the image + */ + *validblockaddr = valid_addr; + /* skip first 2 bytes (link list pointer) */ + *validblockaddr += 2; + return 0; + } + /* more in the link list, continue */ + usedblocks++; + } while (usedblocks <= trans->cfg->base_params->max_ll_items); + + /* OTP has no valid blocks */ + IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); + return -EINVAL; +} + +/** + * iwl_read_eeprom - read EEPROM contents + * + * Load the EEPROM contents from adapter and return it + * and its size. + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) +{ + __le16 *e; + u32 gp = iwl_read32(trans, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + u16 validblockaddr = 0; + u16 cache_addr = 0; + int nvm_is_otp; + + if (!eeprom || !eeprom_size) + return -EINVAL; + + nvm_is_otp = iwl_nvm_is_otp(trans); + if (nvm_is_otp < 0) + return nvm_is_otp; + + sz = trans->cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); + + e = kmalloc(sz, GFP_KERNEL); + if (!e) + return -ENOMEM; + + ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); + if (ret < 0) { + IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + goto err_free; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = iwl_eeprom_acquire_semaphore(trans); + if (ret < 0) { + IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); + goto err_free; + } + + if (nvm_is_otp) { + ret = iwl_init_otp_access(trans); + if (ret) { + IWL_ERR(trans, "Failed to initialize OTP access.\n"); + goto err_unlock; + } + + iwl_write32(trans, CSR_EEPROM_GP, + iwl_read32(trans, CSR_EEPROM_GP) & + ~CSR_EEPROM_GP_IF_OWNER_MSK); + + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + /* traversing the linked list if no shadow ram supported */ + if (!trans->cfg->base_params->shadow_ram_support) { + ret = iwl_find_otp_image(trans, &validblockaddr); + if (ret) + goto err_unlock; + } + for (addr = validblockaddr; addr < validblockaddr + sz; + addr += sizeof(u16)) { + __le16 eeprom_data; + + ret = iwl_read_otp_word(trans, addr, &eeprom_data); + if (ret) + goto err_unlock; + e[cache_addr / 2] = eeprom_data; + cache_addr += sizeof(u16); + } + } else { + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + iwl_write32(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(trans, + "Time out reading EEPROM[%d]\n", addr); + goto err_unlock; + } + r = iwl_read32(trans, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + } + + IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", + nvm_is_otp ? "OTP" : "EEPROM"); + + iwl_eeprom_release_semaphore(trans); + + *eeprom_size = sz; + *eeprom = (u8 *)e; + return 0; + + err_unlock: + iwl_eeprom_release_semaphore(trans); + err_free: + kfree(e); + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_read_eeprom); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h new file mode 100644 index 000000000000..a6d3bdf82cdd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h @@ -0,0 +1,70 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_eeprom_h__ +#define __iwl_eeprom_h__ + +#include "iwl-trans.h" + +int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); + +#endif /* __iwl_eeprom_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h new file mode 100644 index 000000000000..d56064861a9c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -0,0 +1,535 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fh_h__ +#define __iwl_fh_h__ + +#include + +/****************************/ +/* Flow Handler Definitions */ +/****************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x2000) + +/** + * Keep-Warm (KW) buffer base address. + * + * Driver must allocate a 4KByte buffer that is for keeping the + * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency + * DRAM access when doing Txing or Rxing. The dummy accesses prevent host + * from going into a power-savings mode that would cause higher DRAM latency, + * and possible data over/under-runs, before all Tx/Rx is complete. + * + * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) + * of the buffer, which must be 4K aligned. Once this is set up, the device + * automatically invokes keep-warm accesses when normal accesses might not + * be sufficient to maintain fast DRAM response. + * + * Bit fields: + * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned + */ +#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + + +/** + * TFD Circular Buffers Base (CBBC) addresses + * + * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident + * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) + * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte + * aligned (address bits 0-7 must be 0). + * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers + * for them are in different places. + * + * Bit fields in each pointer register: + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned + */ +#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) +#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) +#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) +#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) + +/* Find TFD CB base pointer for given queue */ +static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) +{ + if (chnl < 16) + return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; + if (chnl < 20) + return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); + WARN_ON_ONCE(chnl >= 32); + return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); +} + + +/** + * Rx SRAM Control and Status Registers (RSCSR) + * + * These registers provide handshake between driver and device for the Rx queue + * (this queue handles *all* command responses, notifications, Rx data, etc. + * sent from uCode to host driver). Unlike Tx, there is only one Rx + * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can + * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer + * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 + * mapping between RBDs and RBs. + * + * Driver must allocate host DRAM memory for the following, and set the + * physical address of each into device registers: + * + * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 + * entries (although any power of 2, up to 4096, is selectable by driver). + * Each entry (1 dword) points to a receive buffer (RB) of consistent size + * (typically 4K, although 8K or 16K are also selectable by driver). + * Driver sets up RB size and number of RBDs in the CB via Rx config + * register FH_MEM_RCSR_CHNL0_CONFIG_REG. + * + * Bit fields within one RBD: + * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned + * + * Driver sets physical address [35:8] of base of RBD circular buffer + * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. + * + * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers + * (RBs) have been filled, via a "write pointer", actually the index of + * the RB's corresponding RBD within the circular buffer. Driver sets + * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. + * + * Bit fields in lower dword of Rx status buffer (upper dword not used + * by driver: + * 31-12: Not used by driver + * 11- 0: Index of last filled Rx buffer descriptor + * (device writes, driver reads this value) + * + * As the driver prepares Receive Buffers (RBs) for device to fill, driver must + * enter pointers to these RBs into contiguous RBD circular buffer entries, + * and update the device's "write" index register, + * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. + * + * This "write" index corresponds to the *next* RBD that the driver will make + * available, i.e. one RBD past the tail of the ready-to-fill RBDs within + * the circular buffer. This value should initially be 0 (before preparing any + * RBs), should be 8 after preparing the first 8 RBs (for example), and must + * wrap back to 0 at the end of the circular buffer (but don't wrap before + * "read" index has advanced past 1! See below). + * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. + * + * As the device fills RBs (referenced from contiguous RBDs within the circular + * buffer), it updates the Rx status buffer in host DRAM, 2) described above, + * to tell the driver the index of the latest filled RBD. The driver must + * read this "read" index from DRAM after receiving an Rx interrupt from device + * + * The driver must also internally keep track of a third index, which is the + * next RBD to process. When receiving an Rx interrupt, driver should process + * all filled but unprocessed RBs up to, but not including, the RB + * corresponding to the "read" index. For example, if "read" index becomes "1", + * driver may process the RB pointed to by RBD 0. Depending on volume of + * traffic, there may be many RBs to process. + * + * If read index == write index, device thinks there is no room to put new data. + * Due to this, the maximum number of filled RBs is 255, instead of 256. To + * be safe, make sure that there is a gap of at least 2 RBDs between "write" + * and "read" indexes; that is, make sure that there are no more than 254 + * buffers waiting to be filled. + */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +/** + * Physical base address of 8-byte Rx Status buffer. + * Bit fields: + * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. + */ +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) + +/** + * Physical base address of Rx Buffer Descriptor Circular Buffer. + * Bit fields: + * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. + */ +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) + +/** + * Rx write pointer (index, really!). + * Bit fields: + * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. + * NOTE: For 256-entry circular buffer, use only bits [7:0]. + */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) + +#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) +#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG + +/** + * Rx Config/Status Registers (RCSR) + * Rx Config Reg for channel 0 (only channel used) + * + * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for + * normal operation (see bit fields). + * + * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. + * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for + * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. + * + * Bit fields: + * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29-24: reserved + * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), + * min "5" for 32 RBDs, max "12" for 4096 RBDs. + * 19-18: reserved + * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, + * '10' 12K, '11' 16K. + * 15-14: reserved + * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) + * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) + * typical value 0x10 (about 1/2 msec) + * 3- 0: reserved + */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) +#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8) +#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10) + +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) +#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) +#define RX_RB_TIMEOUT (0x11) + +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) + +#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +/** + * Rx Shared Status Registers (RSSR) + * + * After stopping Rx DMA channel (writing 0 to + * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll + * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. + * + * Bit fields: + * 24: 1 = Channel 0 is idle + * + * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV + * contain default values that should not be altered by the driver. + */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) + +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ + (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 +#define FH_MEM_TB_MAX_LENGTH (0x00020000) + +/* TFDB Area - TFDs buffer table */ +#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) +#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) +#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) +#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) +#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) + +/** + * Transmit DMA Channel Control/Status Registers (TCSR) + * + * Device has one configuration register for each of 8 Tx DMA/FIFO channels + * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, + * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. + * + * To use a Tx DMA channel, driver must initialize its + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: + * + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL + * + * All other bits should be 0. + * + * Bit fields: + * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29- 4: Reserved, set to "0" + * 3: Enable internal DMA requests (1, normal operation), disable (0) + * 2- 0: Reserved, set to "0" + */ +#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) + +/* Find Control/Status reg for given Tx DMA/FIFO channel */ +#define FH_TCSR_CHNL_NUM (8) + +/* TCSR: tx_config register values */ +#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) +#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) +#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) + +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/** + * Tx Shared Status Registers (TSSR) + * + * After stopping Tx DMA channel (writing 0 to + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll + * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle + * (channel's buffers empty | no pending requests). + * + * Bit fields: + * 31-24: 1 = Channel buffers empty (channel 7:0) + * 23-16: 1 = No pending requests (channel 7:0) + */ +#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) +#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) + +#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) + +/** + * Bit fields for TSSR(Tx Shared Status & Control) error status register: + * 31: Indicates an address error when accessed to internal memory + * uCode/driver must write "1" in order to clear this flag + * 30: Indicates that Host did not send the expected number of dwords to FH + * uCode/driver must write "1" in order to clear this flag + * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA + * command was received from the scheduler while the TRB was already full + * with previous command + * uCode/driver must write "1" in order to clear this flag + * 7-0: Each status bit indicates a channel's TxCredit error. When an error + * bit is set, it indicates that the FH has received a full indication + * from the RTC TxFIFO and the current value of the TxCredit counter was + * not equal to zero. This mean that the credit mechanism was not + * synchronized to the TxFIFO status + * uCode/driver must write "1" in order to clear this flag + */ +#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) +#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008) + +#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) + +/* Tx service channels */ +#define FH_SRVC_CHNL (9) +#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) +#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ + (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) + +#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) +#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) + +/* Instruct FH to increment the retry count of a packet when + * it is brought from the memory to TX-FIFO + */ +#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/** + * struct iwl_rb_status - reserve buffer status + * host memory mapped FH registers + * @closed_rb_num [0:11] - Indicates the index of the RB which was closed + * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed + * @finished_rb_num [0:11] - Indicates the index of the current RB + * in which the last frame was written to + * @finished_fr_num [0:11] - Indicates the index of the RX Frame + * which was transferred + */ +struct iwl_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_nam; + __le32 __unused; +} __packed; + + +#define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_BC_DUP (64) +#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) +#define IWL_NUM_OF_TBS 20 + +static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) +{ + return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; +} +/** + * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * + * This structure contains dma address and length of transmission address + * + * @lo: low [31:0] portion of the dma address of TX buffer + * every even is unaligned on 16 bit boundary + * @hi_n_len 0-3 [35:32] portion of dma + * 4-15 length of the tx buffer + */ +struct iwl_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __packed; + +/** + * struct iwl_tfd + * + * Transmit Frame Descriptor (TFD) + * + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + * + * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. + * Both driver and device share these circular buffers, each of which must be + * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * + * Driver must indicate the physical address of the base of each + * circular buffer via the FH_MEM_CBBC_QUEUE registers. + * + * Each TFD contains pointer/size information for up to 20 data buffers + * in host DRAM. These buffers collectively contain the (one) frame described + * by the TFD. Each buffer must be a single contiguous block of memory within + * itself, but buffers may be scattered in host DRAM. Each buffer has max size + * of (4K - 4). The concatenates all of a TFD's buffers into a single + * Tx frame, up to 8 KBytes in size. + * + * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. + */ +struct iwl_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; + __le32 __pad; +} __packed; + +/* Keep Warm Size */ +#define IWL_KW_SIZE 0x1000 /* 4k */ + +/* Fixed (non-configurable) rx data from phy */ + +/** + * struct iwlagn_schedq_bc_tbl scheduler byte count table + * base physical address provided by SCD_DRAM_BASE_ADDR + * @tfd_offset 0-12 - tx command byte count + * 12-16 - station index + */ +struct iwlagn_scd_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; +} __packed; + +#endif /* !__iwl_fh_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h new file mode 100644 index 000000000000..9dbe19cbb4dd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -0,0 +1,320 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_error_dump_h__ +#define __fw_error_dump_h__ + +#include + +#define IWL_FW_ERROR_DUMP_BARKER 0x14789632 + +/** + * enum iwl_fw_error_dump_type - types of data in the dump file + * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0 + * @IWL_FW_ERROR_DUMP_RXF: + * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as + * &struct iwl_fw_error_dump_txcmd packets + * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info + * info on the device / firmware. + * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor + * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several + * sections like this in a single file. + * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers + * @IWL_FW_ERROR_DUMP_MEM: chunk of memory + * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. + * Structured as &struct iwl_fw_error_dump_trigger_desc. + * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as + * &struct iwl_fw_error_dump_rb + * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were + * paged to the DRAM. + */ +enum iwl_fw_error_dump_type { + /* 0 is deprecated */ + IWL_FW_ERROR_DUMP_CSR = 1, + IWL_FW_ERROR_DUMP_RXF = 2, + IWL_FW_ERROR_DUMP_TXCMD = 3, + IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4, + IWL_FW_ERROR_DUMP_FW_MONITOR = 5, + IWL_FW_ERROR_DUMP_PRPH = 6, + IWL_FW_ERROR_DUMP_TXF = 7, + IWL_FW_ERROR_DUMP_FH_REGS = 8, + IWL_FW_ERROR_DUMP_MEM = 9, + IWL_FW_ERROR_DUMP_ERROR_INFO = 10, + IWL_FW_ERROR_DUMP_RB = 11, + IWL_FW_ERROR_DUMP_PAGING = 12, + + IWL_FW_ERROR_DUMP_MAX, +}; + +/** + * struct iwl_fw_error_dump_data - data for one type + * @type: %enum iwl_fw_error_dump_type + * @len: the length starting from %data + * @data: the data itself + */ +struct iwl_fw_error_dump_data { + __le32 type; + __le32 len; + __u8 data[]; +} __packed; + +/** + * struct iwl_fw_error_dump_file - the layout of the header of the file + * @barker: must be %IWL_FW_ERROR_DUMP_BARKER + * @file_len: the length of all the file starting from %barker + * @data: array of %struct iwl_fw_error_dump_data + */ +struct iwl_fw_error_dump_file { + __le32 barker; + __le32 file_len; + u8 data[0]; +} __packed; + +/** + * struct iwl_fw_error_dump_txcmd - TX command data + * @cmdlen: original length of command + * @caplen: captured length of command (may be less) + * @data: captured command data, @caplen bytes + */ +struct iwl_fw_error_dump_txcmd { + __le32 cmdlen; + __le32 caplen; + u8 data[]; +} __packed; + +/** + * struct iwl_fw_error_dump_fifo - RX/TX FIFO data + * @fifo_num: number of FIFO (starting from 0) + * @available_bytes: num of bytes available in FIFO (may be less than FIFO size) + * @wr_ptr: position of write pointer + * @rd_ptr: position of read pointer + * @fence_ptr: position of fence pointer + * @fence_mode: the current mode of the fence (before locking) - + * 0=follow RD pointer ; 1 = freeze + * @data: all of the FIFO's data + */ +struct iwl_fw_error_dump_fifo { + __le32 fifo_num; + __le32 available_bytes; + __le32 wr_ptr; + __le32 rd_ptr; + __le32 fence_ptr; + __le32 fence_mode; + u8 data[]; +} __packed; + +enum iwl_fw_error_dump_family { + IWL_FW_ERROR_DUMP_FAMILY_7 = 7, + IWL_FW_ERROR_DUMP_FAMILY_8 = 8, +}; + +/** + * struct iwl_fw_error_dump_info - info on the device / firmware + * @device_family: the family of the device (7 / 8) + * @hw_step: the step of the device + * @fw_human_readable: human readable FW version + * @dev_human_readable: name of the device + * @bus_human_readable: name of the bus used + */ +struct iwl_fw_error_dump_info { + __le32 device_family; + __le32 hw_step; + u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; + u8 dev_human_readable[64]; + u8 bus_human_readable[8]; +} __packed; + +/** + * struct iwl_fw_error_dump_fw_mon - FW monitor data + * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer + * @fw_mon_base_ptr: base pointer of the data + * @fw_mon_cycle_cnt: number of wraparounds + * @reserved: for future use + * @data: captured data + */ +struct iwl_fw_error_dump_fw_mon { + __le32 fw_mon_wr_ptr; + __le32 fw_mon_base_ptr; + __le32 fw_mon_cycle_cnt; + __le32 reserved[3]; + u8 data[]; +} __packed; + +/** + * struct iwl_fw_error_dump_prph - periphery registers data + * @prph_start: address of the first register in this chunk + * @data: the content of the registers + */ +struct iwl_fw_error_dump_prph { + __le32 prph_start; + __le32 data[]; +}; + +enum iwl_fw_error_dump_mem_type { + IWL_FW_ERROR_DUMP_MEM_SRAM, + IWL_FW_ERROR_DUMP_MEM_SMEM, +}; + +/** + * struct iwl_fw_error_dump_mem - chunk of memory + * @type: %enum iwl_fw_error_dump_mem_type + * @offset: the offset from which the memory was read + * @data: the content of the memory + */ +struct iwl_fw_error_dump_mem { + __le32 type; + __le32 offset; + u8 data[]; +}; + +/** + * struct iwl_fw_error_dump_rb - content of an Receive Buffer + * @index: the index of the Receive Buffer in the Rx queue + * @rxq: the RB's Rx queue + * @reserved: + * @data: the content of the Receive Buffer + */ +struct iwl_fw_error_dump_rb { + __le32 index; + __le32 rxq; + __le32 reserved; + u8 data[]; +}; + +/** + * struct iwl_fw_error_dump_paging - content of the UMAC's image page + * block on DRAM + * @index: the index of the page block + * @reserved: + * @data: the content of the page block + */ +struct iwl_fw_error_dump_paging { + __le32 index; + __le32 reserved; + u8 data[]; +}; + +/** + * iwl_fw_error_next_data - advance fw error dump data pointer + * @data: previous data block + * Returns: next data block + */ +static inline struct iwl_fw_error_dump_data * +iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) +{ + return (void *)(data->data + le32_to_cpu(data->len)); +} + +/** + * enum iwl_fw_dbg_trigger - triggers available + * + * @FW_DBG_TRIGGER_USER: trigger log collection by user + * This should not be defined as a trigger to the driver, but a value the + * driver should set to indicate that the trigger was initiated by the + * user. + * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts + * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are + * missed. + * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch. + * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a + * command response or a notification. + * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event. + * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold. + * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon + * goes below a threshold. + * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang + * detection. + * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related + * events. + * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. + */ +enum iwl_fw_dbg_trigger { + FW_DBG_TRIGGER_INVALID = 0, + FW_DBG_TRIGGER_USER, + FW_DBG_TRIGGER_FW_ASSERT, + FW_DBG_TRIGGER_MISSED_BEACONS, + FW_DBG_TRIGGER_CHANNEL_SWITCH, + FW_DBG_TRIGGER_FW_NOTIF, + FW_DBG_TRIGGER_MLME, + FW_DBG_TRIGGER_STATS, + FW_DBG_TRIGGER_RSSI, + FW_DBG_TRIGGER_TXQ_TIMERS, + FW_DBG_TRIGGER_TIME_EVENT, + FW_DBG_TRIGGER_BA, + + /* must be last */ + FW_DBG_TRIGGER_MAX, +}; + +/** + * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition + * @type: %enum iwl_fw_dbg_trigger + * @data: raw data about what happened + */ +struct iwl_fw_error_dump_trigger_desc { + __le32 type; + u8 data[]; +}; + +#endif /* __fw_error_dump_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h new file mode 100644 index 000000000000..08303db0000f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -0,0 +1,768 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_file_h__ +#define __iwl_fw_file_h__ + +#include +#include + +/* v1/v2 uCode file layout */ +struct iwl_ucode_header { + __le32 ver; /* major/minor/API/serial */ + union { + struct { + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v1; + struct { + __le32 build; /* build number */ + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v2; + } u; +}; + +/* + * new TLV uCode file layout + * + * The new TLV file format contains TLVs, that each specify + * some piece of data. + */ + +enum iwl_ucode_tlv_type { + IWL_UCODE_TLV_INVALID = 0, /* unused */ + IWL_UCODE_TLV_INST = 1, + IWL_UCODE_TLV_DATA = 2, + IWL_UCODE_TLV_INIT = 3, + IWL_UCODE_TLV_INIT_DATA = 4, + IWL_UCODE_TLV_BOOT = 5, + IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ + IWL_UCODE_TLV_PAN = 7, + IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, + IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, + IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, + IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, + IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, + IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, + IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, + IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, + IWL_UCODE_TLV_WOWLAN_INST = 16, + IWL_UCODE_TLV_WOWLAN_DATA = 17, + IWL_UCODE_TLV_FLAGS = 18, + IWL_UCODE_TLV_SEC_RT = 19, + IWL_UCODE_TLV_SEC_INIT = 20, + IWL_UCODE_TLV_SEC_WOWLAN = 21, + IWL_UCODE_TLV_DEF_CALIB = 22, + IWL_UCODE_TLV_PHY_SKU = 23, + IWL_UCODE_TLV_SECURE_SEC_RT = 24, + IWL_UCODE_TLV_SECURE_SEC_INIT = 25, + IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, + IWL_UCODE_TLV_NUM_OF_CPU = 27, + IWL_UCODE_TLV_CSCHEME = 28, + IWL_UCODE_TLV_API_CHANGES_SET = 29, + IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, + IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, + IWL_UCODE_TLV_PAGING = 32, + IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, + IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35, + IWL_UCODE_TLV_FW_VERSION = 36, + IWL_UCODE_TLV_FW_DBG_DEST = 38, + IWL_UCODE_TLV_FW_DBG_CONF = 39, + IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, + IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, +}; + +struct iwl_ucode_tlv { + __le32 type; /* see above */ + __le32 length; /* not including type/length fields */ + u8 data[0]; +}; + +#define IWL_TLV_UCODE_MAGIC 0x0a4c5749 +#define FW_VER_HUMAN_READABLE_SZ 64 + +struct iwl_tlv_ucode_header { + /* + * The TLV style ucode header is distinguished from + * the v1/v2 style header by first four bytes being + * zero, as such is an invalid combination of + * major/minor/API/serial versions. + */ + __le32 zero; + __le32 magic; + u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; + /* major/minor/API/serial or major in new format */ + __le32 ver; + __le32 build; + __le64 ignore; + /* + * The data contained herein has a TLV layout, + * see above for the TLV header and types. + * Note that each TLV is padded to a length + * that is a multiple of 4 for alignment. + */ + u8 data[0]; +}; + +/* + * ucode TLVs + * + * ability to get extension for: flags & capabilities from ucode binaries files + */ +struct iwl_ucode_api { + __le32 api_index; + __le32 api_flags; +} __packed; + +struct iwl_ucode_capa { + __le32 api_index; + __le32 api_capa; +} __packed; + +/** + * enum iwl_ucode_tlv_flag - ucode API flags + * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously + * was a separate TLV but moved here to save space. + * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID, + * treats good CRC threshold as a boolean + * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). + * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. + * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD + * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan + * offload profile config command. + * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six + * (rather than two) IPv6 addresses + * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element + * from the probe request template. + * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) + * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) + * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC + * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and + * P2P client interfaces simultaneously if they are in different bindings. + * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and + * P2P client interfaces simultaneously if they are in same bindings. + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD + * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save + * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. + * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients + * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. + */ +enum iwl_ucode_tlv_flag { + IWL_UCODE_TLV_FLAGS_PAN = BIT(0), + IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), + IWL_UCODE_TLV_FLAGS_MFP = BIT(2), + IWL_UCODE_TLV_FLAGS_P2P = BIT(3), + IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), + IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), + IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), + IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), + IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), + IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), + IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21), + IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), + IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23), + IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), + IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), + IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), + IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), + IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), +}; + +typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; + +/** + * enum iwl_ucode_tlv_api - ucode api + * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex + * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time + * longer than the passive one, which is essential for fragmented scan. + * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. + * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header + * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params + * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format + * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority + * instead of 3. + * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size + * (command version 3) that supports per-chain limits + * + * @NUM_IWL_UCODE_TLV_API: number of bits used + */ +enum iwl_ucode_tlv_api { + IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, + IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, + IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, + IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, + IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, + + NUM_IWL_UCODE_TLV_API +#ifdef __CHECKER__ + /* sparse says it cannot increment the previous enum member */ + = 128 +#endif +}; + +typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; + +/** + * enum iwl_ucode_tlv_capa - ucode capabilities + * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 + * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory + * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. + * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer + * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM) + * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality + * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current + * tx power value into TPC Report action frame and Link Measurement Report + * action frame + * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current + * channel in DS parameter set element in probe requests. + * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in + * probe requests. + * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests + * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), + * which also implies support for the scheduler configuration command + * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching + * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command + * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command + * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload + * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics + * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running + * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different + * sources for the MCC. This TLV bit is a future replacement to + * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR + * is supported. + * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC + * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan + * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement + * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts + * + * @NUM_IWL_UCODE_TLV_CAPA: number of bits used + */ +enum iwl_ucode_tlv_capa { + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, + IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, + IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, + IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, + IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, + IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, + IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, + IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, + + NUM_IWL_UCODE_TLV_CAPA +#ifdef __CHECKER__ + /* sparse says it cannot increment the previous enum member */ + = 128 +#endif +}; + +/* The default calibrate table size if not specified by firmware file */ +#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 +#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 +#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 + +/* The default max probe length if not specified by the firmware file */ +#define IWL_DEFAULT_MAX_PROBE_LENGTH 200 + +/* + * For 16.0 uCode and above, there is no differentiation between sections, + * just an offset to the HW address. + */ +#define IWL_UCODE_SECTION_MAX 16 +#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC +#define PAGING_SEPARATOR_SECTION 0xAAAABBBB + +/* uCode version contains 4 values: Major/Minor/API/Serial */ +#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) + +/* + * Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers. + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers. + */ +struct iwl_tlv_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +enum iwl_fw_phy_cfg { + FW_PHY_CFG_RADIO_TYPE_POS = 0, + FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, + FW_PHY_CFG_RADIO_STEP_POS = 2, + FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, + FW_PHY_CFG_RADIO_DASH_POS = 4, + FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, + FW_PHY_CFG_TX_CHAIN_POS = 16, + FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, + FW_PHY_CFG_RX_CHAIN_POS = 20, + FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, +}; + +#define IWL_UCODE_MAX_CS 1 + +/** + * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW. + * @cipher: a cipher suite selector + * @flags: cipher scheme flags (currently reserved for a future use) + * @hdr_len: a size of MPDU security header + * @pn_len: a size of PN + * @pn_off: an offset of pn from the beginning of the security header + * @key_idx_off: an offset of key index byte in the security header + * @key_idx_mask: a bit mask of key_idx bits + * @key_idx_shift: bit shift needed to get key_idx + * @mic_len: mic length in bytes + * @hw_cipher: a HW cipher index used in host commands + */ +struct iwl_fw_cipher_scheme { + __le32 cipher; + u8 flags; + u8 hdr_len; + u8 pn_len; + u8 pn_off; + u8 key_idx_off; + u8 key_idx_mask; + u8 key_idx_shift; + u8 mic_len; + u8 hw_cipher; +} __packed; + +enum iwl_fw_dbg_reg_operator { + CSR_ASSIGN, + CSR_SETBIT, + CSR_CLEARBIT, + + PRPH_ASSIGN, + PRPH_SETBIT, + PRPH_CLEARBIT, + + INDIRECT_ASSIGN, + INDIRECT_SETBIT, + INDIRECT_CLEARBIT, + + PRPH_BLOCKBIT, +}; + +/** + * struct iwl_fw_dbg_reg_op - an operation on a register + * + * @op: %enum iwl_fw_dbg_reg_operator + * @addr: offset of the register + * @val: value + */ +struct iwl_fw_dbg_reg_op { + u8 op; + u8 reserved[3]; + __le32 addr; + __le32 val; +} __packed; + +/** + * enum iwl_fw_dbg_monitor_mode - available monitor recording modes + * + * @SMEM_MODE: monitor stores the data in SMEM + * @EXTERNAL_MODE: monitor stores the data in allocated DRAM + * @MARBH_MODE: monitor stores the data in MARBH buffer + * @MIPI_MODE: monitor outputs the data through the MIPI interface + */ +enum iwl_fw_dbg_monitor_mode { + SMEM_MODE = 0, + EXTERNAL_MODE = 1, + MARBH_MODE = 2, + MIPI_MODE = 3, +}; + +/** + * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data + * + * @version: version of the TLV - currently 0 + * @monitor_mode: %enum iwl_fw_dbg_monitor_mode + * @size_power: buffer size will be 2^(size_power + 11) + * @base_reg: addr of the base addr register (PRPH) + * @end_reg: addr of the end addr register (PRPH) + * @write_ptr_reg: the addr of the reg of the write pointer + * @wrap_count: the addr of the reg of the wrap_count + * @base_shift: shift right of the base addr reg + * @end_shift: shift right of the end addr reg + * @reg_ops: array of registers operations + * + * This parses IWL_UCODE_TLV_FW_DBG_DEST + */ +struct iwl_fw_dbg_dest_tlv { + u8 version; + u8 monitor_mode; + u8 size_power; + u8 reserved; + __le32 base_reg; + __le32 end_reg; + __le32 write_ptr_reg; + __le32 wrap_count; + u8 base_shift; + u8 end_shift; + struct iwl_fw_dbg_reg_op reg_ops[0]; +} __packed; + +struct iwl_fw_dbg_conf_hcmd { + u8 id; + u8 reserved; + __le16 len; + u8 data[0]; +} __packed; + +/** + * enum iwl_fw_dbg_trigger_mode - triggers functionalities + * + * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism + * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data + * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to + * collect only monitor data + */ +enum iwl_fw_dbg_trigger_mode { + IWL_FW_DBG_TRIGGER_START = BIT(0), + IWL_FW_DBG_TRIGGER_STOP = BIT(1), + IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), +}; + +/** + * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger + * @IWL_FW_DBG_CONF_VIF_ANY: any vif type + * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode + * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode + * @IWL_FW_DBG_CONF_VIF_AP: AP mode + * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode + * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode + * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device + */ +enum iwl_fw_dbg_trigger_vif_type { + IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED, + IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC, + IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION, + IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP, + IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT, + IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO, + IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE, +}; + +/** + * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger + * @id: %enum iwl_fw_dbg_trigger + * @vif_type: %enum iwl_fw_dbg_trigger_vif_type + * @stop_conf_ids: bitmap of configurations this trigger relates to. + * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding + * to the currently running configuration is set, the data should be + * collected. + * @stop_delay: how many milliseconds to wait before collecting the data + * after the STOP trigger fires. + * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both + * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what + * configuration should be applied when the triggers kicks in. + * @occurrences: number of occurrences. 0 means the trigger will never fire. + */ +struct iwl_fw_dbg_trigger_tlv { + __le32 id; + __le32 vif_type; + __le32 stop_conf_ids; + __le32 stop_delay; + u8 mode; + u8 start_conf_id; + __le16 occurrences; + __le32 reserved[2]; + + u8 data[0]; +} __packed; + +#define FW_DBG_START_FROM_ALIVE 0 +#define FW_DBG_CONF_MAX 32 +#define FW_DBG_INVALID 0xff + +/** + * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons + * @stop_consec_missed_bcon: stop recording if threshold is crossed. + * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed. + * @start_consec_missed_bcon: start recording if threshold is crossed. + * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed. + * @reserved1: reserved + * @reserved2: reserved + */ +struct iwl_fw_dbg_trigger_missed_bcon { + __le32 stop_consec_missed_bcon; + __le32 stop_consec_missed_bcon_since_rx; + __le32 reserved2[2]; + __le32 start_consec_missed_bcon; + __le32 start_consec_missed_bcon_since_rx; + __le32 reserved1[2]; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. + * cmds: the list of commands to trigger the collection on + */ +struct iwl_fw_dbg_trigger_cmd { + struct cmd { + u8 cmd_id; + u8 group_id; + } __packed cmds[16]; +} __packed; + +/** + * iwl_fw_dbg_trigger_stats - configures trigger for statistics + * @stop_offset: the offset of the value to be monitored + * @stop_threshold: the threshold above which to collect + * @start_offset: the offset of the value to be monitored + * @start_threshold: the threshold above which to start recording + */ +struct iwl_fw_dbg_trigger_stats { + __le32 stop_offset; + __le32 stop_threshold; + __le32 start_offset; + __le32 start_threshold; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI + * @rssi: RSSI value to trigger at + */ +struct iwl_fw_dbg_trigger_low_rssi { + __le32 rssi; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events + * @stop_auth_denied: number of denied authentication to collect + * @stop_auth_timeout: number of authentication timeout to collect + * @stop_rx_deauth: number of Rx deauth before to collect + * @stop_tx_deauth: number of Tx deauth before to collect + * @stop_assoc_denied: number of denied association to collect + * @stop_assoc_timeout: number of association timeout to collect + * @stop_connection_loss: number of connection loss to collect + * @start_auth_denied: number of denied authentication to start recording + * @start_auth_timeout: number of authentication timeout to start recording + * @start_rx_deauth: number of Rx deauth to start recording + * @start_tx_deauth: number of Tx deauth to start recording + * @start_assoc_denied: number of denied association to start recording + * @start_assoc_timeout: number of association timeout to start recording + * @start_connection_loss: number of connection loss to start recording + */ +struct iwl_fw_dbg_trigger_mlme { + u8 stop_auth_denied; + u8 stop_auth_timeout; + u8 stop_rx_deauth; + u8 stop_tx_deauth; + + u8 stop_assoc_denied; + u8 stop_assoc_timeout; + u8 stop_connection_loss; + u8 reserved; + + u8 start_auth_denied; + u8 start_auth_timeout; + u8 start_rx_deauth; + u8 start_tx_deauth; + + u8 start_assoc_denied; + u8 start_assoc_timeout; + u8 start_connection_loss; + u8 reserved2; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer + * @command_queue: timeout for the command queue in ms + * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms + * @softap: timeout for the queues of a softAP in ms + * @p2p_go: timeout for the queues of a P2P GO in ms + * @p2p_client: timeout for the queues of a P2P client in ms + * @p2p_device: timeout for the queues of a P2P device in ms + * @ibss: timeout for the queues of an IBSS in ms + * @tdls: timeout for the queues of a TDLS station in ms + */ +struct iwl_fw_dbg_trigger_txq_timer { + __le32 command_queue; + __le32 bss; + __le32 softap; + __le32 p2p_go; + __le32 p2p_client; + __le32 p2p_device; + __le32 ibss; + __le32 tdls; + __le32 reserved[4]; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger + * time_Events: a list of tuples . The driver will issue a + * trigger each time a time event notification that relates to time event + * id with one of the actions in the bitmap is received and + * BIT(notif->status) is set in status_bitmap. + * + */ +struct iwl_fw_dbg_trigger_time_event { + struct { + __le32 id; + __le32 action_bitmap; + __le32 status_bitmap; + } __packed time_events[16]; +} __packed; + +/** + * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger + * rx_ba_start: tid bitmap to configure on what tid the trigger should occur + * when an Rx BlockAck session is started. + * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * when an Rx BlockAck session is stopped. + * tx_ba_start: tid bitmap to configure on what tid the trigger should occur + * when a Tx BlockAck session is started. + * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * when a Tx BlockAck session is stopped. + * rx_bar: tid bitmap to configure on what tid the trigger should occur + * when a BAR is received (for a Tx BlockAck session). + * tx_bar: tid bitmap to configure on what tid the trigger should occur + * when a BAR is send (for an Rx BlocAck session). + * frame_timeout: tid bitmap to configure on what tid the trigger should occur + * when a frame times out in the reodering buffer. + */ +struct iwl_fw_dbg_trigger_ba { + __le16 rx_ba_start; + __le16 rx_ba_stop; + __le16 tx_ba_start; + __le16 tx_ba_stop; + __le16 rx_bar; + __le16 tx_bar; + __le16 frame_timeout; +} __packed; + +/** + * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. + * @id: conf id + * @usniffer: should the uSniffer image be used + * @num_of_hcmds: how many HCMDs to send are present here + * @hcmd: a variable length host command to be sent to apply the configuration. + * If there is more than one HCMD to send, they will appear one after the + * other and be sent in the order that they appear in. + * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to + * %FW_DBG_CONF_MAX configuration per run. + */ +struct iwl_fw_dbg_conf_tlv { + u8 id; + u8 usniffer; + u8 reserved; + u8 num_of_hcmds; + struct iwl_fw_dbg_conf_hcmd hcmd; +} __packed; + +/** + * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW + * @max_scan_cache_size: total space allocated for scan results (in bytes). + * @max_scan_buckets: maximum number of channel buckets. + * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. + * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. + * @max_scan_reporting_threshold: max possible report threshold. in percentage. + * @max_hotlist_aps: maximum number of entries for hotlist APs. + * @max_significant_change_aps: maximum number of entries for significant + * change APs. + * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can + * hold. + */ +struct iwl_fw_gscan_capabilities { + __le32 max_scan_cache_size; + __le32 max_scan_buckets; + __le32 max_ap_cache_per_scan; + __le32 max_rssi_sample_size; + __le32 max_scan_reporting_threshold; + __le32 max_hotlist_aps; + __le32 max_significant_change_aps; + __le32 max_bssid_history_entries; +} __packed; + +#endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h new file mode 100644 index 000000000000..84ec0cefb62a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -0,0 +1,322 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_h__ +#define __iwl_fw_h__ +#include +#include + +#include "iwl-fw-file.h" +#include "iwl-fw-error-dump.h" + +/** + * enum iwl_ucode_type + * + * The type of ucode. + * + * @IWL_UCODE_REGULAR: Normal runtime ucode + * @IWL_UCODE_INIT: Initial ucode + * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode + * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image + */ +enum iwl_ucode_type { + IWL_UCODE_REGULAR, + IWL_UCODE_INIT, + IWL_UCODE_WOWLAN, + IWL_UCODE_REGULAR_USNIFFER, + IWL_UCODE_TYPE_MAX, +}; + +/* + * enumeration of ucode section. + * This enumeration is used directly for older firmware (before 16.0). + * For new firmware, there can be up to 4 sections (see below) but the + * first one packaged into the firmware file is the DATA section and + * some debugging code accesses that. + */ +enum iwl_ucode_sec { + IWL_UCODE_SECTION_DATA, + IWL_UCODE_SECTION_INST, +}; + +struct iwl_ucode_capabilities { + u32 max_probe_length; + u32 n_scan_channels; + u32 standard_phy_calibration_size; + u32 flags; + unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; + unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; +}; + +static inline bool +fw_has_api(const struct iwl_ucode_capabilities *capabilities, + iwl_ucode_tlv_api_t api) +{ + return test_bit((__force long)api, capabilities->_api); +} + +static inline bool +fw_has_capa(const struct iwl_ucode_capabilities *capabilities, + iwl_ucode_tlv_capa_t capa) +{ + return test_bit((__force long)capa, capabilities->_capa); +} + +/* one for each uCode image (inst/data, init/runtime/wowlan) */ +struct fw_desc { + const void *data; /* vmalloc'ed data */ + u32 len; /* size in bytes */ + u32 offset; /* offset in the device */ +}; + +struct fw_img { + struct fw_desc sec[IWL_UCODE_SECTION_MAX]; + bool is_dual_cpus; + u32 paging_mem_size; +}; + +struct iwl_sf_region { + u32 addr; + u32 size; +}; + +/* + * Block paging calculations + */ +#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ +#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ +#define PAGE_PER_GROUP_2_EXP_SIZE 3 +/* 8 pages per group */ +#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) +/* don't change, support only 32KB size */ +#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) +/* 32K == 2^15 */ +#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) + +/* + * Image paging calculations + */ +#define BLOCK_PER_IMAGE_2_EXP_SIZE 5 +/* 2^5 == 32 blocks per image */ +#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) +/* maximum image size 1024KB */ +#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) + +/* Virtual address signature */ +#define PAGING_ADDR_SIG 0xAA000000 + +#define PAGING_CMD_IS_SECURED BIT(9) +#define PAGING_CMD_IS_ENABLED BIT(8) +#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 +#define PAGING_TLV_SECURE_MASK 1 + +/** + * struct iwl_fw_paging + * @fw_paging_phys: page phy pointer + * @fw_paging_block: pointer to the allocated block + * @fw_paging_size: page size + */ +struct iwl_fw_paging { + dma_addr_t fw_paging_phys; + struct page *fw_paging_block; + u32 fw_paging_size; +}; + +/** + * struct iwl_fw_cscheme_list - a cipher scheme list + * @size: a number of entries + * @cs: cipher scheme entries + */ +struct iwl_fw_cscheme_list { + u8 size; + struct iwl_fw_cipher_scheme cs[]; +} __packed; + +/** + * struct iwl_gscan_capabilities - gscan capabilities supported by FW + * @max_scan_cache_size: total space allocated for scan results (in bytes). + * @max_scan_buckets: maximum number of channel buckets. + * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. + * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. + * @max_scan_reporting_threshold: max possible report threshold. in percentage. + * @max_hotlist_aps: maximum number of entries for hotlist APs. + * @max_significant_change_aps: maximum number of entries for significant + * change APs. + * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can + * hold. + */ +struct iwl_gscan_capabilities { + u32 max_scan_cache_size; + u32 max_scan_buckets; + u32 max_ap_cache_per_scan; + u32 max_rssi_sample_size; + u32 max_scan_reporting_threshold; + u32 max_hotlist_aps; + u32 max_significant_change_aps; + u32 max_bssid_history_entries; +}; + +/** + * struct iwl_fw - variables associated with the firmware + * + * @ucode_ver: ucode version from the ucode file + * @fw_version: firmware version string + * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. + * @ucode_capa: capabilities parsed from the ucode file. + * @enhance_sensitivity_table: device can do enhanced sensitivity. + * @init_evtlog_ptr: event log offset for init ucode. + * @init_evtlog_size: event log size for init ucode. + * @init_errlog_ptr: error log offfset for init ucode. + * @inst_evtlog_ptr: event log offset for runtime ucode. + * @inst_evtlog_size: event log size for runtime ucode. + * @inst_errlog_ptr: error log offfset for runtime ucode. + * @mvm_fw: indicates this is MVM firmware + * @cipher_scheme: optional external cipher scheme. + * @human_readable: human readable version + * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until + * we get the ALIVE from the uCode + * @dbg_dest_tlv: points to the destination TLV for debug + * @dbg_conf_tlv: array of pointers to configuration TLVs for debug + * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries + * @dbg_trigger_tlv: array of pointers to triggers TLVs + * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries + * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv + */ +struct iwl_fw { + u32 ucode_ver; + + char fw_version[ETHTOOL_FWVERS_LEN]; + + /* ucode images */ + struct fw_img img[IWL_UCODE_TYPE_MAX]; + + struct iwl_ucode_capabilities ucode_capa; + bool enhance_sensitivity_table; + + u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; + u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; + + struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; + u32 phy_config; + u8 valid_tx_ant; + u8 valid_rx_ant; + + bool mvm_fw; + + struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; + u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; + + u32 sdio_adma_addr; + + struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; + u8 dbg_dest_reg_num; + struct iwl_gscan_capabilities gscan_capa; +}; + +static inline const char *get_fw_dbg_mode_string(int mode) +{ + switch (mode) { + case SMEM_MODE: + return "SMEM"; + case EXTERNAL_MODE: + return "EXTERNAL_DRAM"; + case MARBH_MODE: + return "MARBH"; + case MIPI_MODE: + return "MIPI"; + default: + return "UNKNOWN"; + } +} + +static inline bool +iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) +{ + const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; + + if (!conf_tlv) + return false; + + return conf_tlv->usniffer; +} + +#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ + void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ + unlikely(__dbg_trigger); \ +}) + +static inline struct iwl_fw_dbg_trigger_tlv* +iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id) +{ + if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv))) + return NULL; + + return fw->dbg_trigger_tlv[id]; +} + +#endif /* __iwl_fw_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c new file mode 100644 index 000000000000..0bd9d4aad0c0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -0,0 +1,289 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include + +#include "iwl-drv.h" +#include "iwl-io.h" +#include "iwl-csr.h" +#include "iwl-debug.h" +#include "iwl-prph.h" +#include "iwl-fh.h" + +void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) +{ + trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); + iwl_trans_write8(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_write8); + +void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val) +{ + trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val); + iwl_trans_write32(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_write32); + +u32 iwl_read32(struct iwl_trans *trans, u32 ofs) +{ + u32 val = iwl_trans_read32(trans, ofs); + + trace_iwlwifi_dev_ioread32(trans->dev, ofs, val); + return val; +} +IWL_EXPORT_SYMBOL(iwl_read32); + +#define IWL_POLL_INTERVAL 10 /* microseconds */ + +int iwl_poll_bit(struct iwl_trans *trans, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int t = 0; + + do { + if ((iwl_read32(trans, addr) & mask) == (bits & mask)) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} +IWL_EXPORT_SYMBOL(iwl_poll_bit); + +u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) +{ + u32 value = 0x5a5a5a5a; + unsigned long flags; + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + value = iwl_read32(trans, reg); + iwl_trans_release_nic_access(trans, &flags); + } + + return value; +} +IWL_EXPORT_SYMBOL(iwl_read_direct32); + +void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) +{ + unsigned long flags; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + iwl_write32(trans, reg, value); + iwl_trans_release_nic_access(trans, &flags); + } +} +IWL_EXPORT_SYMBOL(iwl_write_direct32); + +int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, + int timeout) +{ + int t = 0; + + do { + if ((iwl_read_direct32(trans, addr) & mask) == mask) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} +IWL_EXPORT_SYMBOL(iwl_poll_direct_bit); + +u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) +{ + u32 val = iwl_trans_read_prph(trans, ofs); + trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); + return val; +} + +void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +{ + trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); + iwl_trans_write_prph(trans, ofs, val); +} + +u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) +{ + unsigned long flags; + u32 val = 0x5a5a5a5a; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + val = __iwl_read_prph(trans, ofs); + iwl_trans_release_nic_access(trans, &flags); + } + return val; +} +IWL_EXPORT_SYMBOL(iwl_read_prph); + +void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +{ + unsigned long flags; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + __iwl_write_prph(trans, ofs, val); + iwl_trans_release_nic_access(trans, &flags); + } +} +IWL_EXPORT_SYMBOL(iwl_write_prph); + +int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int t = 0; + + do { + if ((iwl_read_prph(trans, addr) & mask) == (bits & mask)) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} + +void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) +{ + unsigned long flags; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + __iwl_write_prph(trans, ofs, + __iwl_read_prph(trans, ofs) | mask); + iwl_trans_release_nic_access(trans, &flags); + } +} +IWL_EXPORT_SYMBOL(iwl_set_bits_prph); + +void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, + u32 bits, u32 mask) +{ + unsigned long flags; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + __iwl_write_prph(trans, ofs, + (__iwl_read_prph(trans, ofs) & mask) | bits); + iwl_trans_release_nic_access(trans, &flags); + } +} +IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph); + +void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) +{ + unsigned long flags; + u32 val; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + val = __iwl_read_prph(trans, ofs); + __iwl_write_prph(trans, ofs, (val & ~mask)); + iwl_trans_release_nic_access(trans, &flags); + } +} +IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); + +void iwl_force_nmi(struct iwl_trans *trans) +{ + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + iwl_write_prph(trans, DEVICE_SET_NMI_REG, + DEVICE_SET_NMI_VAL_DRV); + iwl_write_prph(trans, DEVICE_SET_NMI_REG, + DEVICE_SET_NMI_VAL_HW); + } else { + iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG, + DEVICE_SET_NMI_8000_VAL); + iwl_write_prph(trans, DEVICE_SET_NMI_REG, + DEVICE_SET_NMI_VAL_DRV); + } +} +IWL_EXPORT_SYMBOL(iwl_force_nmi); + +static const char *get_fh_string(int cmd) +{ +#define IWL_CMD(x) case x: return #x + switch (cmd) { + IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); + IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); + IWL_CMD(FH_RSCSR_CHNL0_WPTR); + IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); + IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); + IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); + IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +#undef IWL_CMD +} + +int iwl_dump_fh(struct iwl_trans *trans, char **buf) +{ + int i; + static const u32 fh_tbl[] = { + FH_RSCSR_CHNL0_STTS_WPTR_REG, + FH_RSCSR_CHNL0_RBDCB_BASE_REG, + FH_RSCSR_CHNL0_WPTR, + FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_MEM_RSSR_SHARED_CTRL_REG, + FH_MEM_RSSR_RX_STATUS_REG, + FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG + }; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (buf) { + int pos = 0; + size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + + pos += scnprintf(*buf + pos, bufsz - pos, + "FH register values:\n"); + + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) + pos += scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + get_fh_string(fh_tbl[i]), + iwl_read_direct32(trans, fh_tbl[i])); + + return pos; + } +#endif + + IWL_ERR(trans, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) + IWL_ERR(trans, " %34s: 0X%08x\n", + get_fh_string(fh_tbl[i]), + iwl_read_direct32(trans, fh_tbl[i])); + + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h new file mode 100644 index 000000000000..501d0560c061 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -0,0 +1,73 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_io_h__ +#define __iwl_io_h__ + +#include "iwl-devtrace.h" +#include "iwl-trans.h" + +void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val); +void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val); +u32 iwl_read32(struct iwl_trans *trans, u32 ofs); + +static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) +{ + iwl_trans_set_bits_mask(trans, reg, mask, mask); +} + +static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) +{ + iwl_trans_set_bits_mask(trans, reg, mask, 0); +} + +int iwl_poll_bit(struct iwl_trans *trans, u32 addr, + u32 bits, u32 mask, int timeout); +int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, + int timeout); + +u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); +void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); + + +u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs); +u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); +void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); +void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); +int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, + u32 bits, u32 mask, int timeout); +void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); +void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, + u32 bits, u32 mask); +void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); +void iwl_force_nmi(struct iwl_trans *trans); + +/* Error handling */ +int iwl_dump_fh(struct iwl_trans *trans, char **buf); + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h new file mode 100644 index 000000000000..ac2b90df8413 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -0,0 +1,129 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_modparams_h__ +#define __iwl_modparams_h__ + +#include +#include +#include +#include + +extern struct iwl_mod_params iwlwifi_mod_params; + +enum iwl_power_level { + IWL_POWER_INDEX_1, + IWL_POWER_INDEX_2, + IWL_POWER_INDEX_3, + IWL_POWER_INDEX_4, + IWL_POWER_INDEX_5, + IWL_POWER_NUM +}; + +enum iwl_disable_11n { + IWL_DISABLE_HT_ALL = BIT(0), + IWL_DISABLE_HT_TXAGG = BIT(1), + IWL_DISABLE_HT_RXAGG = BIT(2), + IWL_ENABLE_HT_TXAGG = BIT(3), +}; + +/** + * struct iwl_mod_params + * + * Holds the module parameters + * + * @sw_crypto: using hardware encryption, default = 0 + * @disable_11n: disable 11n capabilities, default = 0, + * use IWL_[DIS,EN]ABLE_HT_* constants + * @amsdu_size_8K: enable 8K amsdu size, default = 0 + * @restart_fw: restart firmware, default = 1 + * @bt_coex_active: enable bt coex, default = true + * @led_mode: system default, default = 0 + * @power_save: enable power save, default = false + * @power_level: power level, default = 1 + * @debug_level: levels are IWL_DL_* + * @ant_coupling: antenna coupling in dB, default = 0 + * @d0i3_disable: disable d0i3, default = 1, + * @lar_disable: disable LAR (regulatory), default = 0 + * @fw_monitor: allow to use firmware monitor + */ +struct iwl_mod_params { + int sw_crypto; + unsigned int disable_11n; + int amsdu_size_8K; + bool restart_fw; + bool bt_coex_active; + int led_mode; + bool power_save; + int power_level; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 debug_level; +#endif + int ant_coupling; + char *nvm_file; + bool uapsd_disable; + bool d0i3_disable; + bool lar_disable; + bool fw_monitor; +}; + +#endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c new file mode 100644 index 000000000000..6caf2affbbb5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c @@ -0,0 +1,193 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include + +#include "iwl-drv.h" +#include "iwl-notif-wait.h" + + +void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) +{ + spin_lock_init(¬if_wait->notif_wait_lock); + INIT_LIST_HEAD(¬if_wait->notif_waits); + init_waitqueue_head(¬if_wait->notif_waitq); +} +IWL_EXPORT_SYMBOL(iwl_notification_wait_init); + +void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt) +{ + bool triggered = false; + + if (!list_empty(¬if_wait->notif_waits)) { + struct iwl_notification_wait *w; + + spin_lock(¬if_wait->notif_wait_lock); + list_for_each_entry(w, ¬if_wait->notif_waits, list) { + int i; + bool found = false; + + /* + * If it already finished (triggered) or has been + * aborted then don't evaluate it again to avoid races, + * Otherwise the function could be called again even + * though it returned true before + */ + if (w->triggered || w->aborted) + continue; + + for (i = 0; i < w->n_cmds; i++) { + if (w->cmds[i] == + WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { + found = true; + break; + } + } + if (!found) + continue; + + if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) { + w->triggered = true; + triggered = true; + } + } + spin_unlock(¬if_wait->notif_wait_lock); + + } + + if (triggered) + wake_up_all(¬if_wait->notif_waitq); +} +IWL_EXPORT_SYMBOL(iwl_notification_wait_notify); + +void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) +{ + struct iwl_notification_wait *wait_entry; + + spin_lock(¬if_wait->notif_wait_lock); + list_for_each_entry(wait_entry, ¬if_wait->notif_waits, list) + wait_entry->aborted = true; + spin_unlock(¬if_wait->notif_wait_lock); + + wake_up_all(¬if_wait->notif_waitq); +} +IWL_EXPORT_SYMBOL(iwl_abort_notification_waits); + +void +iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, + struct iwl_notification_wait *wait_entry, + const u16 *cmds, int n_cmds, + bool (*fn)(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data), + void *fn_data) +{ + if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) + n_cmds = MAX_NOTIF_CMDS; + + wait_entry->fn = fn; + wait_entry->fn_data = fn_data; + wait_entry->n_cmds = n_cmds; + memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); + wait_entry->triggered = false; + wait_entry->aborted = false; + + spin_lock_bh(¬if_wait->notif_wait_lock); + list_add(&wait_entry->list, ¬if_wait->notif_waits); + spin_unlock_bh(¬if_wait->notif_wait_lock); +} +IWL_EXPORT_SYMBOL(iwl_init_notification_wait); + +int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, + struct iwl_notification_wait *wait_entry, + unsigned long timeout) +{ + int ret; + + ret = wait_event_timeout(notif_wait->notif_waitq, + wait_entry->triggered || wait_entry->aborted, + timeout); + + spin_lock_bh(¬if_wait->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(¬if_wait->notif_wait_lock); + + if (wait_entry->aborted) + return -EIO; + + /* return value is always >= 0 */ + if (ret <= 0) + return -ETIMEDOUT; + return 0; +} +IWL_EXPORT_SYMBOL(iwl_wait_notification); + +void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, + struct iwl_notification_wait *wait_entry) +{ + spin_lock_bh(¬if_wait->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(¬if_wait->notif_wait_lock); +} +IWL_EXPORT_SYMBOL(iwl_remove_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h new file mode 100644 index 000000000000..dbe8234521de --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h @@ -0,0 +1,139 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_notif_wait_h__ +#define __iwl_notif_wait_h__ + +#include + +#include "iwl-trans.h" + +struct iwl_notif_wait_data { + struct list_head notif_waits; + spinlock_t notif_wait_lock; + wait_queue_head_t notif_waitq; +}; + +#define MAX_NOTIF_CMDS 5 + +/** + * struct iwl_notification_wait - notification wait entry + * @list: list head for global list + * @fn: Function called with the notification. If the function + * returns true, the wait is over, if it returns false then + * the waiter stays blocked. If no function is given, any + * of the listed commands will unblock the waiter. + * @cmds: command IDs + * @n_cmds: number of command IDs + * @triggered: waiter should be woken up + * @aborted: wait was aborted + * + * This structure is not used directly, to wait for a + * notification declare it on the stack, and call + * iwlagn_init_notification_wait() with appropriate + * parameters. Then do whatever will cause the ucode + * to notify the driver, and to wait for that then + * call iwlagn_wait_notification(). + * + * Each notification is one-shot. If at some point we + * need to support multi-shot notifications (which + * can't be allocated on the stack) we need to modify + * the code for them. + */ +struct iwl_notification_wait { + struct list_head list; + + bool (*fn)(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data); + void *fn_data; + + u16 cmds[MAX_NOTIF_CMDS]; + u8 n_cmds; + bool triggered, aborted; +}; + + +/* caller functions */ +void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data); +void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt); +void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); + +/* user functions */ +void __acquires(wait_entry) +iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, + struct iwl_notification_wait *wait_entry, + const u16 *cmds, int n_cmds, + bool (*fn)(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data), + void *fn_data); + +int __must_check __releases(wait_entry) +iwl_wait_notification(struct iwl_notif_wait_data *notif_data, + struct iwl_notification_wait *wait_entry, + unsigned long timeout); + +void __releases(wait_entry) +iwl_remove_notification(struct iwl_notif_wait_data *notif_data, + struct iwl_notification_wait *wait_entry); + +#endif /* __iwl_notif_wait_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c new file mode 100644 index 000000000000..d82984912e04 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -0,0 +1,844 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include +#include +#include +#include +#include +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "iwl-nvm-parse.h" + +/* NVM offsets (in words) definitions */ +enum wkp_nvm_offsets { + /* NVM HW-Section offset (in words) definitions */ + HW_ADDR = 0x15, + + /* NVM SW-Section offset (in words) definitions */ + NVM_SW_SECTION = 0x1C0, + NVM_VERSION = 0, + RADIO_CFG = 1, + SKU = 2, + N_HW_ADDRS = 3, + NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, + + /* NVM calibration section offset (in words) definitions */ + NVM_CALIB_SECTION = 0x2B8, + XTAL_CALIB = 0x316 - NVM_CALIB_SECTION +}; + +enum family_8000_nvm_offsets { + /* NVM HW-Section offset (in words) definitions */ + HW_ADDR0_WFPM_FAMILY_8000 = 0x12, + HW_ADDR1_WFPM_FAMILY_8000 = 0x16, + HW_ADDR0_PCIE_FAMILY_8000 = 0x8A, + HW_ADDR1_PCIE_FAMILY_8000 = 0x8E, + MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1, + + /* NVM SW-Section offset (in words) definitions */ + NVM_SW_SECTION_FAMILY_8000 = 0x1C0, + NVM_VERSION_FAMILY_8000 = 0, + RADIO_CFG_FAMILY_8000 = 0, + SKU_FAMILY_8000 = 2, + N_HW_ADDRS_FAMILY_8000 = 3, + + /* NVM REGULATORY -Section offset (in words) definitions */ + NVM_CHANNELS_FAMILY_8000 = 0, + NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7, + NVM_LAR_OFFSET_FAMILY_8000 = 0x507, + NVM_LAR_ENABLED_FAMILY_8000 = 0x7, + + /* NVM calibration section offset (in words) definitions */ + NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8, + XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000 +}; + +/* SKU Capabilities (actual values from NVM definition) */ +enum nvm_sku_bits { + NVM_SKU_CAP_BAND_24GHZ = BIT(0), + NVM_SKU_CAP_BAND_52GHZ = BIT(1), + NVM_SKU_CAP_11N_ENABLE = BIT(2), + NVM_SKU_CAP_11AC_ENABLE = BIT(3), + NVM_SKU_CAP_MIMO_DISABLE = BIT(5), +}; + +/* + * These are the channel numbers in the order that they are stored in the NVM + */ +static const u8 iwl_nvm_channels[] = { + /* 2.4 GHz */ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + /* 5 GHz */ + 36, 40, 44 , 48, 52, 56, 60, 64, + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161, 165 +}; + +static const u8 iwl_nvm_channels_family_8000[] = { + /* 2.4 GHz */ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + /* 5 GHz */ + 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, + 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161, 165, 169, 173, 177, 181 +}; + +#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) +#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000) +#define NUM_2GHZ_CHANNELS 14 +#define NUM_2GHZ_CHANNELS_FAMILY_8000 14 +#define FIRST_2GHZ_HT_MINUS 5 +#define LAST_2GHZ_HT_PLUS 9 +#define LAST_5GHZ_HT 165 +#define LAST_5GHZ_HT_FAMILY_8000 181 +#define N_HW_ADDR_MASK 0xF + +/* rate data (static) */ +static struct ieee80211_rate iwl_cfg80211_rates[] = { + { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, + { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, + .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, + { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, + { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, + { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, + { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, + { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, + { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, + { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, + { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, +}; +#define RATES_24_OFFS 0 +#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) +#define RATES_52_OFFS 4 +#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) + +/** + * enum iwl_nvm_channel_flags - channel flags in NVM + * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo + * @NVM_CHANNEL_IBSS: usable as an IBSS channel + * @NVM_CHANNEL_ACTIVE: active scanning allowed + * @NVM_CHANNEL_RADAR: radar detection required + * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed + * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS + * on same channel on 2.4 or same UNII band on 5.2 + * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) + * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) + * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) + * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) + */ +enum iwl_nvm_channel_flags { + NVM_CHANNEL_VALID = BIT(0), + NVM_CHANNEL_IBSS = BIT(1), + NVM_CHANNEL_ACTIVE = BIT(3), + NVM_CHANNEL_RADAR = BIT(4), + NVM_CHANNEL_INDOOR_ONLY = BIT(5), + NVM_CHANNEL_GO_CONCURRENT = BIT(6), + NVM_CHANNEL_WIDE = BIT(8), + NVM_CHANNEL_40MHZ = BIT(9), + NVM_CHANNEL_80MHZ = BIT(10), + NVM_CHANNEL_160MHZ = BIT(11), +}; + +#define CHECK_AND_PRINT_I(x) \ + ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") + +static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, + u16 nvm_flags, const struct iwl_cfg *cfg) +{ + u32 flags = IEEE80211_CHAN_NO_HT40; + u32 last_5ghz_ht = LAST_5GHZ_HT; + + if (cfg->device_family == IWL_DEVICE_FAMILY_8000) + last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; + + if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { + if (ch_num <= LAST_2GHZ_HT_PLUS) + flags &= ~IEEE80211_CHAN_NO_HT40PLUS; + if (ch_num >= FIRST_2GHZ_HT_MINUS) + flags &= ~IEEE80211_CHAN_NO_HT40MINUS; + } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) { + if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) + flags &= ~IEEE80211_CHAN_NO_HT40PLUS; + else + flags &= ~IEEE80211_CHAN_NO_HT40MINUS; + } + if (!(nvm_flags & NVM_CHANNEL_80MHZ)) + flags |= IEEE80211_CHAN_NO_80MHZ; + if (!(nvm_flags & NVM_CHANNEL_160MHZ)) + flags |= IEEE80211_CHAN_NO_160MHZ; + + if (!(nvm_flags & NVM_CHANNEL_IBSS)) + flags |= IEEE80211_CHAN_NO_IR; + + if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) + flags |= IEEE80211_CHAN_NO_IR; + + if (nvm_flags & NVM_CHANNEL_RADAR) + flags |= IEEE80211_CHAN_RADAR; + + if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) + flags |= IEEE80211_CHAN_INDOOR_ONLY; + + /* Set the GO concurrent flag only in case that NO_IR is set. + * Otherwise it is meaningless + */ + if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && + (flags & IEEE80211_CHAN_NO_IR)) + flags |= IEEE80211_CHAN_IR_CONCURRENT; + + return flags; +} + +static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 * const nvm_ch_flags, + bool lar_supported) +{ + int ch_idx; + int n_channels = 0; + struct ieee80211_channel *channel; + u16 ch_flags; + bool is_5ghz; + int num_of_ch, num_2ghz_channels; + const u8 *nvm_chan; + + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + num_of_ch = IWL_NUM_CHANNELS; + nvm_chan = &iwl_nvm_channels[0]; + num_2ghz_channels = NUM_2GHZ_CHANNELS; + } else { + num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000; + nvm_chan = &iwl_nvm_channels_family_8000[0]; + num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000; + } + + for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { + ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); + + if (ch_idx >= num_2ghz_channels && + !data->sku_cap_band_52GHz_enable) + continue; + + if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) { + /* + * Channels might become valid later if lar is + * supported, hence we still want to add them to + * the list of supported channels to cfg80211. + */ + IWL_DEBUG_EEPROM(dev, + "Ch. %d Flags %x [%sGHz] - No traffic\n", + nvm_chan[ch_idx], + ch_flags, + (ch_idx >= num_2ghz_channels) ? + "5.2" : "2.4"); + continue; + } + + channel = &data->channels[n_channels]; + n_channels++; + + channel->hw_value = nvm_chan[ch_idx]; + channel->band = (ch_idx < num_2ghz_channels) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + channel->center_freq = + ieee80211_channel_to_frequency( + channel->hw_value, channel->band); + + /* Initialize regulatory-based run-time data */ + + /* + * Default value - highest tx power value. max_power + * is not used in mvm, and is used for backwards compatibility + */ + channel->max_power = IWL_DEFAULT_MAX_TX_POWER; + is_5ghz = channel->band == IEEE80211_BAND_5GHZ; + + /* don't put limitations in case we're using LAR */ + if (!lar_supported) + channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], + ch_idx, is_5ghz, + ch_flags, cfg); + else + channel->flags = 0; + + IWL_DEBUG_EEPROM(dev, + "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", + channel->hw_value, + is_5ghz ? "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(INDOOR_ONLY), + CHECK_AND_PRINT_I(GO_CONCURRENT), + ch_flags, + channel->max_power, + ((ch_flags & NVM_CHANNEL_IBSS) && + !(ch_flags & NVM_CHANNEL_RADAR)) + ? "" : "not "); + } + + return n_channels; +} + +static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + struct ieee80211_sta_vht_cap *vht_cap, + u8 tx_chains, u8 rx_chains) +{ + int num_rx_ants = num_of_ant(rx_chains); + int num_tx_ants = num_of_ant(tx_chains); + unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: + IEEE80211_VHT_MAX_AMPDU_1024K); + + vht_cap->vht_supported = true; + + vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | + max_ampdu_exponent << + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + + if (cfg->ht_params->ldpc) + vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + + if (data->sku_cap_mimo_disabled) { + num_rx_ants = 1; + num_tx_ants = 1; + } + + if (num_tx_ants > 1) + vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; + else + vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + + if (iwlwifi_mod_params.amsdu_size_8K) + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; + + vht_cap->vht_mcs.rx_mcs_map = + cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); + + if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { + vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; + /* this works because NOT_SUPPORTED == 3 */ + vht_cap->vht_mcs.rx_mcs_map |= + cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); + } + + vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; +} + +static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 *ch_section, + u8 tx_chains, u8 rx_chains, bool lar_supported) +{ + int n_channels; + int n_used = 0; + struct ieee80211_supported_band *sband; + + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) + n_channels = iwl_init_channel_map( + dev, cfg, data, + &ch_section[NVM_CHANNELS], lar_supported); + else + n_channels = iwl_init_channel_map( + dev, cfg, data, + &ch_section[NVM_CHANNELS_FAMILY_8000], + lar_supported); + + sband = &data->bands[IEEE80211_BAND_2GHZ]; + sband->band = IEEE80211_BAND_2GHZ; + sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; + sband->n_bitrates = N_RATES_24; + n_used += iwl_init_sband_channels(data, sband, n_channels, + IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + tx_chains, rx_chains); + + sband = &data->bands[IEEE80211_BAND_5GHZ]; + sband->band = IEEE80211_BAND_5GHZ; + sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; + sband->n_bitrates = N_RATES_52; + n_used += iwl_init_sband_channels(data, sband, n_channels, + IEEE80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + tx_chains, rx_chains); + if (data->sku_cap_11ac_enable) + iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, + tx_chains, rx_chains); + + if (n_channels != n_used) + IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", + n_used, n_channels); +} + +static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, + const __le16 *phy_sku) +{ + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) + return le16_to_cpup(nvm_sw + SKU); + + return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); +} + +static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) +{ + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) + return le16_to_cpup(nvm_sw + NVM_VERSION); + else + return le32_to_cpup((__le32 *)(nvm_sw + + NVM_VERSION_FAMILY_8000)); +} + +static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, + const __le16 *phy_sku) +{ + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) + return le16_to_cpup(nvm_sw + RADIO_CFG); + + return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); + +} + +static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) +{ + int n_hw_addr; + + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) + return le16_to_cpup(nvm_sw + N_HW_ADDRS); + + n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); + + return n_hw_addr & N_HW_ADDR_MASK; +} + +static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + u32 radio_cfg) +{ + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); + data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); + data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); + data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); + return; + } + + /* set the radio configuration for family 8000 */ + data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg); + data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg); + data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg); + data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg); + data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg); + data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg); +} + +static void iwl_set_hw_address(const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 *nvm_sec) +{ + const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR); + + /* The byte order is little endian 16 bit, meaning 214365 */ + data->hw_addr[0] = hw_addr[1]; + data->hw_addr[1] = hw_addr[0]; + data->hw_addr[2] = hw_addr[3]; + data->hw_addr[3] = hw_addr[2]; + data->hw_addr[4] = hw_addr[5]; + data->hw_addr[5] = hw_addr[4]; +} + +static void iwl_set_hw_address_family_8000(struct device *dev, + const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 *mac_override, + const __le16 *nvm_hw, + u32 mac_addr0, u32 mac_addr1) +{ + const u8 *hw_addr; + + if (mac_override) { + static const u8 reserved_mac[] = { + 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 + }; + + hw_addr = (const u8 *)(mac_override + + MAC_ADDRESS_OVERRIDE_FAMILY_8000); + + /* + * Store the MAC address from MAO section. + * No byte swapping is required in MAO section + */ + memcpy(data->hw_addr, hw_addr, ETH_ALEN); + + /* + * Force the use of the OTP MAC address in case of reserved MAC + * address in the NVM, or if address is given but invalid. + */ + if (is_valid_ether_addr(data->hw_addr) && + memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) + return; + + IWL_ERR_DEV(dev, + "mac address from nvm override section is not valid\n"); + } + + if (nvm_hw) { + /* read the MAC address from HW resisters */ + hw_addr = (const u8 *)&mac_addr0; + data->hw_addr[0] = hw_addr[3]; + data->hw_addr[1] = hw_addr[2]; + data->hw_addr[2] = hw_addr[1]; + data->hw_addr[3] = hw_addr[0]; + + hw_addr = (const u8 *)&mac_addr1; + data->hw_addr[4] = hw_addr[1]; + data->hw_addr[5] = hw_addr[0]; + + if (!is_valid_ether_addr(data->hw_addr)) + IWL_ERR_DEV(dev, + "mac address from hw section is not valid\n"); + + return; + } + + IWL_ERR_DEV(dev, "mac address is not found\n"); +} + +#define IWL_4165_DEVICE_ID 0x5501 + +struct iwl_nvm_data * +iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, + const __le16 *nvm_hw, const __le16 *nvm_sw, + const __le16 *nvm_calib, const __le16 *regulatory, + const __le16 *mac_override, const __le16 *phy_sku, + u8 tx_chains, u8 rx_chains, bool lar_fw_supported, + u32 mac_addr0, u32 mac_addr1, u32 hw_id) +{ + struct iwl_nvm_data *data; + u32 sku; + u32 radio_cfg; + u16 lar_config; + + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) + data = kzalloc(sizeof(*data) + + sizeof(struct ieee80211_channel) * + IWL_NUM_CHANNELS, + GFP_KERNEL); + else + data = kzalloc(sizeof(*data) + + sizeof(struct ieee80211_channel) * + IWL_NUM_CHANNELS_FAMILY_8000, + GFP_KERNEL); + if (!data) + return NULL; + + data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); + + radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); + iwl_set_radio_cfg(cfg, data, radio_cfg); + if (data->valid_tx_ant) + tx_chains &= data->valid_tx_ant; + if (data->valid_rx_ant) + rx_chains &= data->valid_rx_ant; + + sku = iwl_get_sku(cfg, nvm_sw, phy_sku); + data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; + data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) + data->sku_cap_11n_enable = false; + data->sku_cap_11ac_enable = data->sku_cap_11n_enable && + (sku & NVM_SKU_CAP_11AC_ENABLE); + data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; + + /* + * OTP 0x52 bug work around + * define antenna 1x1 according to MIMO disabled + */ + if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) { + data->valid_tx_ant = ANT_B; + data->valid_rx_ant = ANT_B; + tx_chains = ANT_B; + rx_chains = ANT_B; + } + + data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); + + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + /* Checking for required sections */ + if (!nvm_calib) { + IWL_ERR_DEV(dev, + "Can't parse empty Calib NVM sections\n"); + kfree(data); + return NULL; + } + /* in family 8000 Xtal calibration values moved to OTP */ + data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); + data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); + } + + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + iwl_set_hw_address(cfg, data, nvm_hw); + + iwl_init_sbands(dev, cfg, data, nvm_sw, + tx_chains, rx_chains, lar_fw_supported); + } else { + u16 lar_offset = data->nvm_version < 0xE39 ? + NVM_LAR_OFFSET_FAMILY_8000_OLD : + NVM_LAR_OFFSET_FAMILY_8000; + + lar_config = le16_to_cpup(regulatory + lar_offset); + data->lar_enabled = !!(lar_config & + NVM_LAR_ENABLED_FAMILY_8000); + + /* MAC address in family 8000 */ + iwl_set_hw_address_family_8000(dev, cfg, data, mac_override, + nvm_hw, mac_addr0, mac_addr1); + + iwl_init_sbands(dev, cfg, data, regulatory, + tx_chains, rx_chains, + lar_fw_supported && data->lar_enabled); + } + + data->calib_version = 255; + + return data; +} +IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); + +static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, + int ch_idx, u16 nvm_flags, + const struct iwl_cfg *cfg) +{ + u32 flags = NL80211_RRF_NO_HT40; + u32 last_5ghz_ht = LAST_5GHZ_HT; + + if (cfg->device_family == IWL_DEVICE_FAMILY_8000) + last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; + + if (ch_idx < NUM_2GHZ_CHANNELS && + (nvm_flags & NVM_CHANNEL_40MHZ)) { + if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) + flags &= ~NL80211_RRF_NO_HT40PLUS; + if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) + flags &= ~NL80211_RRF_NO_HT40MINUS; + } else if (nvm_chan[ch_idx] <= last_5ghz_ht && + (nvm_flags & NVM_CHANNEL_40MHZ)) { + if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) + flags &= ~NL80211_RRF_NO_HT40PLUS; + else + flags &= ~NL80211_RRF_NO_HT40MINUS; + } + + if (!(nvm_flags & NVM_CHANNEL_80MHZ)) + flags |= NL80211_RRF_NO_80MHZ; + if (!(nvm_flags & NVM_CHANNEL_160MHZ)) + flags |= NL80211_RRF_NO_160MHZ; + + if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) + flags |= NL80211_RRF_NO_IR; + + if (nvm_flags & NVM_CHANNEL_RADAR) + flags |= NL80211_RRF_DFS; + + if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) + flags |= NL80211_RRF_NO_OUTDOOR; + + /* Set the GO concurrent flag only in case that NO_IR is set. + * Otherwise it is meaningless + */ + if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && + (flags & NL80211_RRF_NO_IR)) + flags |= NL80211_RRF_GO_CONCURRENT; + + return flags; +} + +struct ieee80211_regdomain * +iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, + int num_of_ch, __le32 *channels, u16 fw_mcc) +{ + int ch_idx; + u16 ch_flags, prev_ch_flags = 0; + const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ? + iwl_nvm_channels_family_8000 : iwl_nvm_channels; + struct ieee80211_regdomain *regd; + int size_of_regd; + struct ieee80211_reg_rule *rule; + enum ieee80211_band band; + int center_freq, prev_center_freq = 0; + int valid_rules = 0; + bool new_rule; + int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ? + IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS; + + if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) + return ERR_PTR(-EINVAL); + + if (WARN_ON(num_of_ch > max_num_ch)) + num_of_ch = max_num_ch; + + IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", + num_of_ch); + + /* build a regdomain rule for every valid channel */ + size_of_regd = + sizeof(struct ieee80211_regdomain) + + num_of_ch * sizeof(struct ieee80211_reg_rule); + + regd = kzalloc(size_of_regd, GFP_KERNEL); + if (!regd) + return ERR_PTR(-ENOMEM); + + for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { + ch_flags = (u16)__le32_to_cpup(channels + ch_idx); + band = (ch_idx < NUM_2GHZ_CHANNELS) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], + band); + new_rule = false; + + if (!(ch_flags & NVM_CHANNEL_VALID)) { + IWL_DEBUG_DEV(dev, IWL_DL_LAR, + "Ch. %d Flags %x [%sGHz] - No traffic\n", + nvm_chan[ch_idx], + ch_flags, + (ch_idx >= NUM_2GHZ_CHANNELS) ? + "5.2" : "2.4"); + continue; + } + + /* we can't continue the same rule */ + if (ch_idx == 0 || prev_ch_flags != ch_flags || + center_freq - prev_center_freq > 20) { + valid_rules++; + new_rule = true; + } + + rule = ®d->reg_rules[valid_rules - 1]; + + if (new_rule) + rule->freq_range.start_freq_khz = + MHZ_TO_KHZ(center_freq - 10); + + rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); + + /* this doesn't matter - not used by FW */ + rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); + rule->power_rule.max_eirp = + DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); + + rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, + ch_flags, cfg); + + /* rely on auto-calculation to merge BW of contiguous chans */ + rule->flags |= NL80211_RRF_AUTO_BW; + rule->freq_range.max_bandwidth_khz = 0; + + prev_ch_flags = ch_flags; + prev_center_freq = center_freq; + + IWL_DEBUG_DEV(dev, IWL_DL_LAR, + "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", + center_freq, + band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(40MHZ), + CHECK_AND_PRINT_I(80MHZ), + CHECK_AND_PRINT_I(160MHZ), + CHECK_AND_PRINT_I(INDOOR_ONLY), + CHECK_AND_PRINT_I(GO_CONCURRENT), + ch_flags, + ((ch_flags & NVM_CHANNEL_ACTIVE) && + !(ch_flags & NVM_CHANNEL_RADAR)) + ? "" : "not "); + } + + regd->n_reg_rules = valid_rules; + + /* set alpha2 from FW. */ + regd->alpha2[0] = fw_mcc >> 8; + regd->alpha2[1] = fw_mcc & 0xff; + + return regd; +} +IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h new file mode 100644 index 000000000000..9f44d8188c5c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -0,0 +1,97 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#ifndef __iwl_nvm_parse_h__ +#define __iwl_nvm_parse_h__ + +#include +#include "iwl-eeprom-parse.h" + +/** + * iwl_parse_nvm_data - parse NVM data and return values + * + * This function parses all NVM values we need and then + * returns a (newly allocated) struct containing all the + * relevant values for driver use. The struct must be freed + * later with iwl_free_nvm_data(). + */ +struct iwl_nvm_data * +iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, + const __le16 *nvm_hw, const __le16 *nvm_sw, + const __le16 *nvm_calib, const __le16 *regulatory, + const __le16 *mac_override, const __le16 *phy_sku, + u8 tx_chains, u8 rx_chains, bool lar_fw_supported, + u32 mac_addr0, u32 mac_addr1, u32 hw_id); + +/** + * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW + * + * This function parses the regulatory channel data received as a + * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, + * to be fed into the regulatory core. An ERR_PTR is returned on error. + * If not given to the regulatory core, the user is responsible for freeing + * the regdomain returned here with kfree. + */ +struct ieee80211_regdomain * +iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, + int num_of_ch, __le32 *channels, u16 fw_mcc); + +#endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h new file mode 100644 index 000000000000..2a58d6833224 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -0,0 +1,271 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_op_mode_h__ +#define __iwl_op_mode_h__ + +#include +#include + +struct iwl_op_mode; +struct iwl_trans; +struct sk_buff; +struct iwl_device_cmd; +struct iwl_rx_cmd_buffer; +struct iwl_fw; +struct iwl_cfg; + +/** + * DOC: Operational mode - what is it ? + * + * The operational mode (a.k.a. op_mode) is the layer that implements + * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses + * the transport API to access the HW. The op_mode doesn't need to know how the + * underlying HW works, since the transport layer takes care of that. + * + * There can be several op_mode: i.e. different fw APIs will require two + * different op_modes. This is why the op_mode is virtualized. + */ + +/** + * DOC: Life cycle of the Operational mode + * + * The operational mode has a very simple life cycle. + * + * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the + * capabilities advertised by the fw file (in TLV format). + * 2) The driver layer starts the op_mode (ops->start) + * 3) The op_mode registers mac80211 + * 4) The op_mode is governed by mac80211 + * 5) The driver layer stops the op_mode + */ + +/** + * struct iwl_op_mode_ops - op_mode specific operations + * + * The op_mode exports its ops so that external components can start it and + * interact with it. The driver layer typically calls the start and stop + * handlers, the transport layer calls the others. + * + * All the handlers MUST be implemented, except @rx_rss which can be left + * out *iff* the opmode will never run on hardware with multi-queue capability. + * + * @start: start the op_mode. The transport layer is already allocated. + * May sleep + * @stop: stop the op_mode. Must free all the memory allocated. + * May sleep + * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the + * HCMD this Rx responds to. Can't sleep. + * @rx_rss: data queue RX notification to the op_mode, for (data) notifications + * received on the RSS queue(s). The queue parameter indicates which of the + * RSS queues received this frame; it will always be non-zero. + * This method must not sleep. + * @queue_full: notifies that a HW queue is full. + * Must be atomic and called with BH disabled. + * @queue_not_full: notifies that a HW queue is not full any more. + * Must be atomic and called with BH disabled. + * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that + * the radio is killed. Return %true if the device should be stopped by + * the transport immediately after the call. May sleep. + * @free_skb: allows the transport layer to free skbs that haven't been + * reclaimed by the op_mode. This can happen when the driver is freed and + * there are Tx packets pending in the transport layer. + * Must be atomic + * @nic_error: error notification. Must be atomic and must be called with BH + * disabled. + * @cmd_queue_full: Called when the command queue gets full. Must be atomic and + * called with BH disabled. + * @nic_config: configure NIC, called before firmware is started. + * May sleep + * @wimax_active: invoked when WiMax becomes active. May sleep + * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3 + * entrance is aborted (e.g. due to held reference). May sleep. + * @exit_d0i3: configure the fw to exit d0i3. May sleep. + */ +struct iwl_op_mode_ops { + struct iwl_op_mode *(*start)(struct iwl_trans *trans, + const struct iwl_cfg *cfg, + const struct iwl_fw *fw, + struct dentry *dbgfs_dir); + void (*stop)(struct iwl_op_mode *op_mode); + void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); + void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, unsigned int queue); + void (*queue_full)(struct iwl_op_mode *op_mode, int queue); + void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); + bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); + void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); + void (*nic_error)(struct iwl_op_mode *op_mode); + void (*cmd_queue_full)(struct iwl_op_mode *op_mode); + void (*nic_config)(struct iwl_op_mode *op_mode); + void (*wimax_active)(struct iwl_op_mode *op_mode); + int (*enter_d0i3)(struct iwl_op_mode *op_mode); + int (*exit_d0i3)(struct iwl_op_mode *op_mode); +}; + +int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); +void iwl_opmode_deregister(const char *name); + +/** + * struct iwl_op_mode - operational mode + * @ops: pointer to its own ops + * + * This holds an implementation of the mac80211 / fw API. + */ +struct iwl_op_mode { + const struct iwl_op_mode_ops *ops; + + char op_mode_specific[0] __aligned(sizeof(void *)); +}; + +static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) +{ + might_sleep(); + op_mode->ops->stop(op_mode); +} + +static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) +{ + return op_mode->ops->rx(op_mode, napi, rxb); +} + +static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, + unsigned int queue) +{ + op_mode->ops->rx_rss(op_mode, napi, rxb, queue); +} + +static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, + int queue) +{ + op_mode->ops->queue_full(op_mode, queue); +} + +static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, + int queue) +{ + op_mode->ops->queue_not_full(op_mode, queue); +} + +static inline bool __must_check +iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) +{ + might_sleep(); + return op_mode->ops->hw_rf_kill(op_mode, state); +} + +static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, + struct sk_buff *skb) +{ + op_mode->ops->free_skb(op_mode, skb); +} + +static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode) +{ + op_mode->ops->nic_error(op_mode); +} + +static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) +{ + op_mode->ops->cmd_queue_full(op_mode); +} + +static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) +{ + might_sleep(); + op_mode->ops->nic_config(op_mode); +} + +static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) +{ + might_sleep(); + op_mode->ops->wimax_active(op_mode); +} + +static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode) +{ + might_sleep(); + + if (!op_mode->ops->enter_d0i3) + return 0; + return op_mode->ops->enter_d0i3(op_mode); +} + +static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode) +{ + might_sleep(); + + if (!op_mode->ops->exit_d0i3) + return 0; + return op_mode->ops->exit_d0i3(op_mode); +} + +#endif /* __iwl_op_mode_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c new file mode 100644 index 000000000000..a105455b6a24 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c @@ -0,0 +1,471 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include + +#include "iwl-drv.h" +#include "iwl-phy-db.h" +#include "iwl-debug.h" +#include "iwl-op-mode.h" +#include "iwl-trans.h" + +#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ +#define IWL_NUM_PAPD_CH_GROUPS 9 +#define IWL_NUM_TXP_CH_GROUPS 9 + +struct iwl_phy_db_entry { + u16 size; + u8 *data; +}; + +/** + * struct iwl_phy_db - stores phy configuration and calibration data. + * + * @cfg: phy configuration. + * @calib_nch: non channel specific calibration data. + * @calib_ch: channel specific calibration data. + * @calib_ch_group_papd: calibration data related to papd channel group. + * @calib_ch_group_txp: calibration data related to tx power chanel group. + */ +struct iwl_phy_db { + struct iwl_phy_db_entry cfg; + struct iwl_phy_db_entry calib_nch; + struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; + struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; + + struct iwl_trans *trans; +}; + +enum iwl_phy_db_section_type { + IWL_PHY_DB_CFG = 1, + IWL_PHY_DB_CALIB_NCH, + IWL_PHY_DB_UNUSED, + IWL_PHY_DB_CALIB_CHG_PAPD, + IWL_PHY_DB_CALIB_CHG_TXP, + IWL_PHY_DB_MAX +}; + +#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */ + +/* + * phy db - configure operational ucode + */ +struct iwl_phy_db_cmd { + __le16 type; + __le16 length; + u8 data[]; +} __packed; + +/* for parsing of tx power channel group data that comes from the firmware*/ +struct iwl_phy_db_chg_txp { + __le32 space; + __le16 max_channel_idx; +} __packed; + +/* + * phy db - Receive phy db chunk after calibrations + */ +struct iwl_calib_res_notif_phy_db { + __le16 type; + __le16 length; + u8 data[]; +} __packed; + +struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) +{ + struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), + GFP_KERNEL); + + if (!phy_db) + return phy_db; + + phy_db->trans = trans; + + /* TODO: add default values of the phy db. */ + return phy_db; +} +IWL_EXPORT_SYMBOL(iwl_phy_db_init); + +/* + * get phy db section: returns a pointer to a phy db section specified by + * type and channel group id. + */ +static struct iwl_phy_db_entry * +iwl_phy_db_get_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u16 chg_id) +{ + if (!phy_db || type >= IWL_PHY_DB_MAX) + return NULL; + + switch (type) { + case IWL_PHY_DB_CFG: + return &phy_db->cfg; + case IWL_PHY_DB_CALIB_NCH: + return &phy_db->calib_nch; + case IWL_PHY_DB_CALIB_CHG_PAPD: + if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) + return NULL; + return &phy_db->calib_ch_group_papd[chg_id]; + case IWL_PHY_DB_CALIB_CHG_TXP: + if (chg_id >= IWL_NUM_TXP_CH_GROUPS) + return NULL; + return &phy_db->calib_ch_group_txp[chg_id]; + default: + return NULL; + } + return NULL; +} + +static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u16 chg_id) +{ + struct iwl_phy_db_entry *entry = + iwl_phy_db_get_section(phy_db, type, chg_id); + if (!entry) + return; + + kfree(entry->data); + entry->data = NULL; + entry->size = 0; +} + +void iwl_phy_db_free(struct iwl_phy_db *phy_db) +{ + int i; + + if (!phy_db) + return; + + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); + for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); + for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i); + + kfree(phy_db); +} +IWL_EXPORT_SYMBOL(iwl_phy_db_free); + +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, + gfp_t alloc_ctx) +{ + struct iwl_calib_res_notif_phy_db *phy_db_notif = + (struct iwl_calib_res_notif_phy_db *)pkt->data; + enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type); + u16 size = le16_to_cpu(phy_db_notif->length); + struct iwl_phy_db_entry *entry; + u16 chg_id = 0; + + if (!phy_db) + return -EINVAL; + + if (type == IWL_PHY_DB_CALIB_CHG_PAPD || + type == IWL_PHY_DB_CALIB_CHG_TXP) + chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); + + entry = iwl_phy_db_get_section(phy_db, type, chg_id); + if (!entry) + return -EINVAL; + + kfree(entry->data); + entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx); + if (!entry->data) { + entry->size = 0; + return -ENOMEM; + } + + entry->size = size; + + IWL_DEBUG_INFO(phy_db->trans, + "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", + __func__, __LINE__, type, size); + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_phy_db_set_section); + +static int is_valid_channel(u16 ch_id) +{ + if (ch_id <= 14 || + (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || + (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || + (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) + return 1; + return 0; +} + +static u8 ch_id_to_ch_index(u16 ch_id) +{ + if (WARN_ON(!is_valid_channel(ch_id))) + return 0xff; + + if (ch_id <= 14) + return ch_id - 1; + if (ch_id <= 64) + return (ch_id + 20) / 4; + if (ch_id <= 140) + return (ch_id - 12) / 4; + return (ch_id - 13) / 4; +} + + +static u16 channel_id_to_papd(u16 ch_id) +{ + if (WARN_ON(!is_valid_channel(ch_id))) + return 0xff; + + if (1 <= ch_id && ch_id <= 14) + return 0; + if (36 <= ch_id && ch_id <= 64) + return 1; + if (100 <= ch_id && ch_id <= 140) + return 2; + return 3; +} + +static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id) +{ + struct iwl_phy_db_chg_txp *txp_chg; + int i; + u8 ch_index = ch_id_to_ch_index(ch_id); + if (ch_index == 0xff) + return 0xff; + + for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) { + txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; + if (!txp_chg) + return 0xff; + /* + * Looking for the first channel group that its max channel is + * higher then wanted channel. + */ + if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index) + return i; + } + return 0xff; +} +static +int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, + u32 type, u8 **data, u16 *size, u16 ch_id) +{ + struct iwl_phy_db_entry *entry; + u16 ch_group_id = 0; + + if (!phy_db) + return -EINVAL; + + /* find wanted channel group */ + if (type == IWL_PHY_DB_CALIB_CHG_PAPD) + ch_group_id = channel_id_to_papd(ch_id); + else if (type == IWL_PHY_DB_CALIB_CHG_TXP) + ch_group_id = channel_id_to_txp(phy_db, ch_id); + + entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); + if (!entry) + return -EINVAL; + + *data = entry->data; + *size = entry->size; + + IWL_DEBUG_INFO(phy_db->trans, + "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", + __func__, __LINE__, type, *size); + + return 0; +} + +static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type, + u16 length, void *data) +{ + struct iwl_phy_db_cmd phy_db_cmd; + struct iwl_host_cmd cmd = { + .id = PHY_DB_CMD, + }; + + IWL_DEBUG_INFO(phy_db->trans, + "Sending PHY-DB hcmd of type %d, of length %d\n", + type, length); + + /* Set phy db cmd variables */ + phy_db_cmd.type = cpu_to_le16(type); + phy_db_cmd.length = cpu_to_le16(length); + + /* Set hcmd variables */ + cmd.data[0] = &phy_db_cmd; + cmd.len[0] = sizeof(struct iwl_phy_db_cmd); + cmd.data[1] = data; + cmd.len[1] = length; + cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; + + return iwl_trans_send_cmd(phy_db->trans, &cmd); +} + +static int iwl_phy_db_send_all_channel_groups( + struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u8 max_ch_groups) +{ + u16 i; + int err; + struct iwl_phy_db_entry *entry; + + /* Send all the channel specific groups to operational fw */ + for (i = 0; i < max_ch_groups; i++) { + entry = iwl_phy_db_get_section(phy_db, + type, + i); + if (!entry) + return -EINVAL; + + if (!entry->size) + continue; + + /* Send the requested PHY DB section */ + err = iwl_send_phy_db_cmd(phy_db, + type, + entry->size, + entry->data); + if (err) { + IWL_ERR(phy_db->trans, + "Can't SEND phy_db section %d (%d), err %d\n", + type, i, err); + return err; + } + + IWL_DEBUG_INFO(phy_db->trans, + "Sent PHY_DB HCMD, type = %d num = %d\n", + type, i); + } + + return 0; +} + +int iwl_send_phy_db_data(struct iwl_phy_db *phy_db) +{ + u8 *data = NULL; + u16 size = 0; + int err; + + IWL_DEBUG_INFO(phy_db->trans, + "Sending phy db data and configuration to runtime image\n"); + + /* Send PHY DB CFG section */ + err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG, + &data, &size, 0); + if (err) { + IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n"); + return err; + } + + err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send HCMD of Phy DB cfg section\n"); + return err; + } + + err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH, + &data, &size, 0); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot get Phy DB non specific channel section\n"); + return err; + } + + err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send HCMD of Phy DB non specific channel section\n"); + return err; + } + + /* Send all the TXP channel specific data */ + err = iwl_phy_db_send_all_channel_groups(phy_db, + IWL_PHY_DB_CALIB_CHG_PAPD, + IWL_NUM_PAPD_CH_GROUPS); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send channel specific PAPD groups\n"); + return err; + } + + /* Send all the TXP channel specific data */ + err = iwl_phy_db_send_all_channel_groups(phy_db, + IWL_PHY_DB_CALIB_CHG_TXP, + IWL_NUM_TXP_CH_GROUPS); + if (err) { + IWL_ERR(phy_db->trans, + "Cannot send channel specific TX power groups\n"); + return err; + } + + IWL_DEBUG_INFO(phy_db->trans, + "Finished sending phy db non channel data\n"); + return 0; +} +IWL_EXPORT_SYMBOL(iwl_send_phy_db_data); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h new file mode 100644 index 000000000000..9ee18d0d2d01 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h @@ -0,0 +1,82 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __IWL_PHYDB_H__ +#define __IWL_PHYDB_H__ + +#include + +#include "iwl-op-mode.h" +#include "iwl-trans.h" + +struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans); + +void iwl_phy_db_free(struct iwl_phy_db *phy_db); + +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, + gfp_t alloc_ctx); + + +int iwl_send_phy_db_data(struct iwl_phy_db *phy_db); + +#endif /* __IWL_PHYDB_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h new file mode 100644 index 000000000000..3ab777f79e4f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -0,0 +1,401 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_prph_h__ +#define __iwl_prph_h__ + +/* + * Registers in this file are internal, not PCI bus memory mapped. + * Driver accesses these via HBUS_TARG_PRPH_* registers. + */ +#define PRPH_BASE (0x00000) +#define PRPH_END (0xFFFFF) + +/* APMG (power management) constants */ +#define APMG_BASE (PRPH_BASE + 0x3000) +#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) +#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) +#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) +#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) +#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) +#define APMG_RFKILL_REG (APMG_BASE + 0x0014) +#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) +#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) +#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) +#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) + +#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001) +#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) +#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) + +#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) +#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) +#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) +#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ +#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) + +#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200) +#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) +#define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000) + +#define APMG_RTC_INT_STT_RFKILL (0x10000000) + +/* Device system time */ +#define DEVICE_SYSTEM_TIME_REG 0xA0206C + +/* Device NMI register */ +#define DEVICE_SET_NMI_REG 0x00a01c30 +#define DEVICE_SET_NMI_VAL_HW BIT(0) +#define DEVICE_SET_NMI_VAL_DRV BIT(7) +#define DEVICE_SET_NMI_8000_REG 0x00a01c24 +#define DEVICE_SET_NMI_8000_VAL 0x1000000 + +/* Shared registers (0x0..0x3ff, via target indirect or periphery */ +#define SHR_BASE 0x00a10000 + +/* Shared GP1 register */ +#define SHR_APMG_GP1_REG 0x01dc +#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG) +#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004 +#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000 + +/* Shared DL_CFG register */ +#define SHR_APMG_DL_CFG_REG 0x01c4 +#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG) +#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0 +#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080 +#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100 + +/* Shared APMG_XTAL_CFG register */ +#define SHR_APMG_XTAL_CFG_REG 0x1c0 +#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000 + +/* + * Device reset for family 8000 + * write to bit 24 in order to reset the CPU +*/ +#define RELEASE_CPU_RESET (0x300C) +#define RELEASE_CPU_RESET_BIT BIT(24) + +/***************************************************************************** + * 7000/3000 series SHR DTS addresses * + *****************************************************************************/ + +#define SHR_MISC_WFM_DTS_EN (0x00a10024) +#define DTSC_CFG_MODE (0x00a10604) +#define DTSC_VREF_AVG (0x00a10648) +#define DTSC_VREF5_AVG (0x00a1064c) +#define DTSC_CFG_MODE_PERIODIC (0x2) +#define DTSC_PTAT_AVG (0x00a10650) + + +/** + * Tx Scheduler + * + * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs + * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in + * host DRAM. It steers each frame's Tx command (which contains the frame + * data) into one of up to 7 prioritized Tx DMA FIFO channels within the + * device. A queue maps to only one (selectable by driver) Tx DMA channel, + * but one DMA channel may take input from several queues. + * + * Tx DMA FIFOs have dedicated purposes. + * + * For 5000 series and up, they are used differently + * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): + * + * 0 -- EDCA BK (background) frames, lowest priority + * 1 -- EDCA BE (best effort) frames, normal priority + * 2 -- EDCA VI (video) frames, higher priority + * 3 -- EDCA VO (voice) and management frames, highest priority + * 4 -- unused + * 5 -- unused + * 6 -- unused + * 7 -- Commands + * + * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. + * In addition, driver can map the remaining queues to Tx DMA/FIFO + * channels 0-3 to support 11n aggregation via EDCA DMA channels. + * + * The driver sets up each queue to work in one of two modes: + * + * 1) Scheduler-Ack, in which the scheduler automatically supports a + * block-ack (BA) window of up to 64 TFDs. In this mode, each queue + * contains TFDs for a unique combination of Recipient Address (RA) + * and Traffic Identifier (TID), that is, traffic of a given + * Quality-Of-Service (QOS) priority, destined for a single station. + * + * In scheduler-ack mode, the scheduler keeps track of the Tx status of + * each frame within the BA window, including whether it's been transmitted, + * and whether it's been acknowledged by the receiving station. The device + * automatically processes block-acks received from the receiving STA, + * and reschedules un-acked frames to be retransmitted (successful + * Tx completion may end up being out-of-order). + * + * The driver must maintain the queue's Byte Count table in host DRAM + * for this mode. + * This mode does not support fragmentation. + * + * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. + * The device may automatically retry Tx, but will retry only one frame + * at a time, until receiving ACK from receiving station, or reaching + * retry limit and giving up. + * + * The command queue (#4/#9) must use this mode! + * This mode does not require use of the Byte Count table in host DRAM. + * + * Driver controls scheduler operation via 3 means: + * 1) Scheduler registers + * 2) Shared scheduler data base in internal SRAM + * 3) Shared data in host DRAM + * + * Initialization: + * + * When loading, driver should allocate memory for: + * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. + * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory + * (1024 bytes for each queue). + * + * After receiving "Alive" response from uCode, driver must initialize + * the scheduler (especially for queue #4/#9, the command queue, otherwise + * the driver can't issue commands!): + */ +#define SCD_MEM_LOWER_BOUND (0x0000) + +/** + * Max Tx window size is the max number of contiguous TFDs that the scheduler + * can keep track of at one time when creating block-ack chains of frames. + * Note that "64" matches the number of ack bits in a block-ack packet. + */ +#define SCD_WIN_SIZE 64 +#define SCD_FRAME_LIMIT 64 + +#define SCD_TXFIFO_POS_TID (0) +#define SCD_TXFIFO_POS_RA (4) +#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) + +/* agn SCD */ +#define SCD_QUEUE_STTS_REG_POS_TXF (0) +#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) +#define SCD_QUEUE_STTS_REG_POS_WSL (4) +#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) +#define SCD_QUEUE_STTS_REG_MSK (0x017F0000) + +#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) +#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) +#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) +#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) +#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) +#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) + +/* Context Data */ +#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) +#define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) + +/* Tx status */ +#define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) +#define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) + +/* Translation Data */ +#define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) +#define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808) + +#define SCD_CONTEXT_QUEUE_OFFSET(x)\ + (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) + +#define SCD_TX_STTS_QUEUE_OFFSET(x)\ + (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16)) + +#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ + ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) + +#define SCD_BASE (PRPH_BASE + 0xa02c00) + +#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0) +#define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8) +#define SCD_AIT (SCD_BASE + 0x0c) +#define SCD_TXFACT (SCD_BASE + 0x10) +#define SCD_ACTIVE (SCD_BASE + 0x14) +#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) +#define SCD_CHAINEXT_EN (SCD_BASE + 0x244) +#define SCD_AGGR_SEL (SCD_BASE + 0x248) +#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) +#define SCD_GP_CTRL (SCD_BASE + 0x1a8) +#define SCD_EN_CTRL (SCD_BASE + 0x254) + +/*********************** END TX SCHEDULER *************************************/ + +/* tcp checksum offload */ +#define RX_EN_CSUM (0x00a00d88) + +/* Oscillator clock */ +#define OSC_CLK (0xa04068) +#define OSC_CLK_FORCE_CONTROL (0x8) + +#define FH_UCODE_LOAD_STATUS (0x1AF0) +#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70) +enum secure_load_status_reg { + LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001, + LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003, + LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007, + LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8, + LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00, +}; + +#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38) +#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C) +#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) +#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) + +#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000) +#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000) +#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) +#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) + +/* Rx FIFO */ +#define RXF_SIZE_ADDR (0xa00c88) +#define RXF_RD_D_SPACE (0xa00c40) +#define RXF_RD_WR_PTR (0xa00c50) +#define RXF_RD_RD_PTR (0xa00c54) +#define RXF_RD_FENCE_PTR (0xa00c4c) +#define RXF_SET_FENCE_MODE (0xa00c14) +#define RXF_LD_WR2FENCE (0xa00c1c) +#define RXF_FIFO_RD_FENCE_INC (0xa00c68) +#define RXF_SIZE_BYTE_CND_POS (7) +#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) +#define RXF_DIFF_FROM_PREV (0x200) + +#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) +#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) + +/* Tx FIFO */ +#define TXF_FIFO_ITEM_CNT (0xa00438) +#define TXF_WR_PTR (0xa00414) +#define TXF_RD_PTR (0xa00410) +#define TXF_FENCE_PTR (0xa00418) +#define TXF_LOCK_FENCE (0xa00424) +#define TXF_LARC_NUM (0xa0043c) +#define TXF_READ_MODIFY_DATA (0xa00448) +#define TXF_READ_MODIFY_ADDR (0xa0044c) + +/* FW monitor */ +#define MON_BUFF_SAMPLE_CTL (0xa03c00) +#define MON_BUFF_BASE_ADDR (0xa03c3c) +#define MON_BUFF_END_ADDR (0xa03c40) +#define MON_BUFF_WRPTR (0xa03c44) +#define MON_BUFF_CYCLE_CNT (0xa03c48) + +#define MON_DMARB_RD_CTL_ADDR (0xa03c60) +#define MON_DMARB_RD_DATA_ADDR (0xa03c5c) + +#define DBGC_IN_SAMPLE (0xa03c00) + +/* enable the ID buf for read */ +#define WFPM_PS_CTL_CLR 0xA0300C +#define WFMP_MAC_ADDR_0 0xA03080 +#define WFMP_MAC_ADDR_1 0xA03084 +#define LMPM_PMG_EN 0xA01CEC +#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 +#define RFIC_REG_RD 0xAD0470 +#define WFPM_CTRL_REG 0xA03030 +enum { + ENABLE_WFPM = BIT(31), + WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, +}; + +#define AUX_MISC_REG 0xA200B0 +enum { + HW_STEP_LOCATION_BITS = 24, +}; + +#define AUX_MISC_MASTER1_EN 0xA20818 +enum aux_misc_master1_en { + AUX_MISC_MASTER1_EN_SBE_MSK = 0x1, +}; + +#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 +#define RSA_ENABLE 0xA24B08 +#define PREG_AUX_BUS_WPROT_0 0xA04CC0 +#define SB_CPU_1_STATUS 0xA01E30 +#define SB_CPU_2_STATUS 0xA01E34 + +/* FW chicken bits */ +#define LMPM_CHICK 0xA01FF8 +enum { + LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), +}; + +/* FW chicken bits */ +#define LMPM_PAGE_PASS_NOTIF 0xA03824 +enum { + LMPM_PAGE_PASS_NOTIF_POS = BIT(20), +}; + +#endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h new file mode 100644 index 000000000000..f2353ebf2666 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h @@ -0,0 +1,143 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_scd_h__ +#define __iwl_scd_h__ + +#include "iwl-trans.h" +#include "iwl-io.h" +#include "iwl-prph.h" + + +static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_disable_agg(struct iwl_trans *trans) +{ + iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0); +} + +static inline void iwl_scd_activate_fifos(struct iwl_trans *trans) +{ + iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7)); +} + +static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans) +{ + iwl_write_prph(trans, SCD_TXFACT, 0); +} + +static inline void iwl_scd_enable_set_active(struct iwl_trans *trans, + u32 value) +{ + iwl_write_prph(trans, SCD_EN_CTRL, value); +} + +static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) +{ + if (chnl < 20) + return SCD_BASE + 0x18 + chnl * 4; + WARN_ON_ONCE(chnl >= 32); + return SCD_BASE + 0x284 + (chnl - 20) * 4; +} + +static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl) +{ + if (chnl < 20) + return SCD_BASE + 0x68 + chnl * 4; + WARN_ON_ONCE(chnl >= 32); + return SCD_BASE + 0x2B4 + chnl * 4; +} + +static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl) +{ + if (chnl < 20) + return SCD_BASE + 0x10c + chnl * 4; + WARN_ON_ONCE(chnl >= 32); + return SCD_BASE + 0x334 + chnl * 4; +} + +static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c new file mode 100644 index 000000000000..71610968c365 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -0,0 +1,114 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include "iwl-trans.h" + +struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, + struct device *dev, + const struct iwl_cfg *cfg, + const struct iwl_trans_ops *ops, + size_t dev_cmd_headroom) +{ + struct iwl_trans *trans; +#ifdef CONFIG_LOCKDEP + static struct lock_class_key __key; +#endif + + trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL); + if (!trans) + return NULL; + +#ifdef CONFIG_LOCKDEP + lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", + &__key, 0); +#endif + + trans->dev = dev; + trans->cfg = cfg; + trans->ops = ops; + trans->dev_cmd_headroom = dev_cmd_headroom; + trans->num_rx_queues = 1; + + snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), + "iwl_cmd_pool:%s", dev_name(trans->dev)); + trans->dev_cmd_pool = + kmem_cache_create(trans->dev_cmd_pool_name, + sizeof(struct iwl_device_cmd) + + trans->dev_cmd_headroom, + sizeof(void *), + SLAB_HWCACHE_ALIGN, + NULL); + if (!trans->dev_cmd_pool) + goto free; + + return trans; + free: + kfree(trans); + return NULL; +} + +void iwl_trans_free(struct iwl_trans *trans) +{ + kmem_cache_destroy(trans->dev_cmd_pool); + kfree(trans); +} diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h new file mode 100644 index 000000000000..6f76525088f0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -0,0 +1,1125 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_trans_h__ +#define __iwl_trans_h__ + +#include +#include /* for page_address */ +#include + +#include "iwl-debug.h" +#include "iwl-config.h" +#include "iwl-fw.h" +#include "iwl-op-mode.h" + +/** + * DOC: Transport layer - what is it ? + * + * The transport layer is the layer that deals with the HW directly. It provides + * an abstraction of the underlying HW to the upper layer. The transport layer + * doesn't provide any policy, algorithm or anything of this kind, but only + * mechanisms to make the HW do something. It is not completely stateless but + * close to it. + * We will have an implementation for each different supported bus. + */ + +/** + * DOC: Life cycle of the transport layer + * + * The transport layer has a very precise life cycle. + * + * 1) A helper function is called during the module initialization and + * registers the bus driver's ops with the transport's alloc function. + * 2) Bus's probe calls to the transport layer's allocation functions. + * Of course this function is bus specific. + * 3) This allocation functions will spawn the upper layer which will + * register mac80211. + * + * 4) At some point (i.e. mac80211's start call), the op_mode will call + * the following sequence: + * start_hw + * start_fw + * + * 5) Then when finished (or reset): + * stop_device + * + * 6) Eventually, the free function will be called. + */ + +/** + * DOC: Host command section + * + * A host command is a command issued by the upper layer to the fw. There are + * several versions of fw that have several APIs. The transport layer is + * completely agnostic to these differences. + * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), + */ +#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) +#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) +#define SEQ_TO_INDEX(s) ((s) & 0xff) +#define INDEX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_RX_FRAME cpu_to_le16(0x8000) + +/* + * those functions retrieve specific information from + * the id field in the iwl_host_cmd struct which contains + * the command id, the group id and the version of the command + * and vice versa +*/ +static inline u8 iwl_cmd_opcode(u32 cmdid) +{ + return cmdid & 0xFF; +} + +static inline u8 iwl_cmd_groupid(u32 cmdid) +{ + return ((cmdid & 0xFF00) >> 8); +} + +static inline u8 iwl_cmd_version(u32 cmdid) +{ + return ((cmdid & 0xFF0000) >> 16); +} + +static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) +{ + return opcode + (groupid << 8) + (version << 16); +} + +/* make u16 wide id out of u8 group and opcode */ +#define WIDE_ID(grp, opcode) ((grp << 8) | opcode) + +/* due to the conversion, this group is special; new groups + * should be defined in the appropriate fw-api header files + */ +#define IWL_ALWAYS_LONG_GROUP 1 + +/** + * struct iwl_cmd_header + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + */ +struct iwl_cmd_header { + u8 cmd; /* Command ID: REPLY_RXON, etc. */ + u8 group_id; + /* + * The driver sets up the sequence number to values of its choosing. + * uCode does not use this value, but passes it back to the driver + * when sending the response to each driver-originated command, so + * the driver can match the response to the command. Since the values + * don't get used by uCode, the driver may set up an arbitrary format. + * + * There is one exception: uCode sets bit 15 when it originates + * the response/notification, i.e. when the response/notification + * is not a direct response to a command sent by the driver. For + * example, uCode issues REPLY_RX when it sends a received frame + * to the driver; it is not a direct response to any driver command. + * + * The Linux driver uses the following format: + * + * 0:7 tfd index - position within TX queue + * 8:12 TX queue id + * 13:14 reserved + * 15 unsolicited RX or uCode-originated notification + */ + __le16 sequence; +} __packed; + +/** + * struct iwl_cmd_header_wide + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + * this is the wide version that contains more information about the command + * like length, version and command type + */ +struct iwl_cmd_header_wide { + u8 cmd; + u8 group_id; + __le16 sequence; + __le16 length; + u8 reserved; + u8 version; +} __packed; + +#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ +#define FH_RSCSR_FRAME_INVALID 0x55550000 +#define FH_RSCSR_FRAME_ALIGN 0x40 + +struct iwl_rx_packet { + /* + * The first 4 bytes of the RX frame header contain both the RX frame + * size and some flags. + * Bit fields: + * 31: flag flush RB request + * 30: flag ignore TC (terminal counter) request + * 29: flag fast IRQ request + * 28-14: Reserved + * 13-00: RX frame size + */ + __le32 len_n_flags; + struct iwl_cmd_header hdr; + u8 data[]; +} __packed; + +static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) +{ + return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; +} + +static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) +{ + return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); +} + +/** + * enum CMD_MODE - how to send the host commands ? + * + * @CMD_ASYNC: Return right away and don't wait for the response + * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of + * the response. The caller needs to call iwl_free_resp when done. + * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the + * command queue, but after other high priority commands. Valid only + * with CMD_ASYNC. + * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle. + * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. + * @CMD_WAKE_UP_TRANS: The command response should wake up the trans + * (i.e. mark it as non-idle). + * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to + * check that we leave enough room for the TBs bitmap which needs 20 bits. + */ +enum CMD_MODE { + CMD_ASYNC = BIT(0), + CMD_WANT_SKB = BIT(1), + CMD_SEND_IN_RFKILL = BIT(2), + CMD_HIGH_PRIO = BIT(3), + CMD_SEND_IN_IDLE = BIT(4), + CMD_MAKE_TRANS_IDLE = BIT(5), + CMD_WAKE_UP_TRANS = BIT(6), + + CMD_TB_BITMAP_POS = 11, +}; + +#define DEF_CMD_PAYLOAD_SIZE 320 + +/** + * struct iwl_device_cmd + * + * For allocation of the command and tx queues, this establishes the overall + * size of the largest command we send to uCode, except for commands that + * aren't fully copied and use other TFD space. + */ +struct iwl_device_cmd { + union { + struct { + struct iwl_cmd_header hdr; /* uCode API */ + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + }; + struct { + struct iwl_cmd_header_wide hdr_wide; + u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - + sizeof(struct iwl_cmd_header_wide) + + sizeof(struct iwl_cmd_header)]; + }; + }; +} __packed; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) + +/* + * number of transfer buffers (fragments) per transmit frame descriptor; + * this is just the driver's idea, the hardware supports 20 + */ +#define IWL_MAX_CMD_TBS_PER_TFD 2 + +/** + * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command + * + * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's + * ring. The transport layer doesn't map the command's buffer to DMA, but + * rather copies it to a previously allocated DMA buffer. This flag tells + * the transport layer not to copy the command, but to map the existing + * buffer (that is passed in) instead. This saves the memcpy and allows + * commands that are bigger than the fixed buffer to be submitted. + * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. + * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this + * chunk internally and free it again after the command completes. This + * can (currently) be used only once per command. + * Note that a TFD entry after a DUP one cannot be a normal copied one. + */ +enum iwl_hcmd_dataflag { + IWL_HCMD_DFL_NOCOPY = BIT(0), + IWL_HCMD_DFL_DUP = BIT(1), +}; + +/** + * struct iwl_host_cmd - Host command to the uCode + * + * @data: array of chunks that composes the data of the host command + * @resp_pkt: response packet, if %CMD_WANT_SKB was set + * @_rx_page_order: (internally used to free response packet) + * @_rx_page_addr: (internally used to free response packet) + * @flags: can be CMD_* + * @len: array of the lengths of the chunks in data + * @dataflags: IWL_HCMD_DFL_* + * @id: command id of the host command, for wide commands encoding the + * version and group as well + */ +struct iwl_host_cmd { + const void *data[IWL_MAX_CMD_TBS_PER_TFD]; + struct iwl_rx_packet *resp_pkt; + unsigned long _rx_page_addr; + u32 _rx_page_order; + + u32 flags; + u32 id; + u16 len[IWL_MAX_CMD_TBS_PER_TFD]; + u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; +}; + +static inline void iwl_free_resp(struct iwl_host_cmd *cmd) +{ + free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); +} + +struct iwl_rx_cmd_buffer { + struct page *_page; + int _offset; + bool _page_stolen; + u32 _rx_page_order; + unsigned int truesize; +}; + +static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) +{ + return (void *)((unsigned long)page_address(r->_page) + r->_offset); +} + +static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) +{ + return r->_offset; +} + +static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) +{ + r->_page_stolen = true; + get_page(r->_page); + return r->_page; +} + +static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) +{ + __free_pages(r->_page, r->_rx_page_order); +} + +#define MAX_NO_RECLAIM_CMDS 6 + +#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) + +/* + * Maximum number of HW queues the transport layer + * currently supports + */ +#define IWL_MAX_HW_QUEUES 32 +#define IWL_MAX_TID_COUNT 8 +#define IWL_FRAME_LIMIT 64 +#define IWL_MAX_RX_HW_QUEUES 16 + +/** + * enum iwl_wowlan_status - WoWLAN image/device status + * @IWL_D3_STATUS_ALIVE: firmware is still running after resume + * @IWL_D3_STATUS_RESET: device was reset while suspended + */ +enum iwl_d3_status { + IWL_D3_STATUS_ALIVE, + IWL_D3_STATUS_RESET, +}; + +/** + * enum iwl_trans_status: transport status flags + * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed + * @STATUS_DEVICE_ENABLED: APM is enabled + * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) + * @STATUS_INT_ENABLED: interrupts are enabled + * @STATUS_RFKILL: the HW RFkill switch is in KILL position + * @STATUS_FW_ERROR: the fw is in error state + * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands + * are sent + * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent + * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation + */ +enum iwl_trans_status { + STATUS_SYNC_HCMD_ACTIVE, + STATUS_DEVICE_ENABLED, + STATUS_TPOWER_PMI, + STATUS_INT_ENABLED, + STATUS_RFKILL, + STATUS_FW_ERROR, + STATUS_TRANS_GOING_IDLE, + STATUS_TRANS_IDLE, + STATUS_TRANS_DEAD, +}; + +/** + * struct iwl_trans_config - transport configuration + * + * @op_mode: pointer to the upper layer. + * @cmd_queue: the index of the command queue. + * Must be set before start_fw. + * @cmd_fifo: the fifo for host commands + * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. + * @no_reclaim_cmds: Some devices erroneously don't set the + * SEQ_RX_FRAME bit on some notifications, this is the + * list of such notifications to filter. Max length is + * %MAX_NO_RECLAIM_CMDS. + * @n_no_reclaim_cmds: # of commands in list + * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs, + * if unset 4k will be the RX buffer size + * @bc_table_dword: set to true if the BC table expects the byte count to be + * in DWORD (as opposed to bytes) + * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: firmware supports wide host command header + * @command_names: array of command names, must be 256 entries + * (one for each command); for debugging only + * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until + * we get the ALIVE from the uCode + */ +struct iwl_trans_config { + struct iwl_op_mode *op_mode; + + u8 cmd_queue; + u8 cmd_fifo; + unsigned int cmd_q_wdg_timeout; + const u8 *no_reclaim_cmds; + unsigned int n_no_reclaim_cmds; + + bool rx_buf_size_8k; + bool bc_table_dword; + bool scd_set_active; + bool wide_cmd_header; + const char *const *command_names; + + u32 sdio_adma_addr; +}; + +struct iwl_trans_dump_data { + u32 len; + u8 data[]; +}; + +struct iwl_trans; + +struct iwl_trans_txq_scd_cfg { + u8 fifo; + s8 sta_id; + u8 tid; + bool aggregate; + int frame_limit; +}; + +/** + * struct iwl_trans_ops - transport specific operations + * + * All the handlers MUST be implemented + * + * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken + * out of a low power state. From that point on, the HW can send + * interrupts. May sleep. + * @op_mode_leave: Turn off the HW RF kill indication if on + * May sleep + * @start_fw: allocates and inits all the resources for the transport + * layer. Also kick a fw image. + * May sleep + * @fw_alive: called when the fw sends alive notification. If the fw provides + * the SCD base address in SRAM, then provide it here, or 0 otherwise. + * May sleep + * @stop_device: stops the whole device (embedded CPU put to reset) and stops + * the HW. If low_power is true, the NIC will be put in low power state. + * From that point on, the HW will be stopped but will still issue an + * interrupt if the HW RF kill switch is triggered. + * This callback must do the right thing and not crash even if %start_hw() + * was called but not &start_fw(). May sleep. + * @d3_suspend: put the device into the correct mode for WoWLAN during + * suspend. This is optional, if not implemented WoWLAN will not be + * supported. This callback may sleep. + * @d3_resume: resume the device after WoWLAN, enabling the opmode to + * talk to the WoWLAN image to get its status. This is optional, if not + * implemented WoWLAN will not be supported. This callback may sleep. + * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. + * If RFkill is asserted in the middle of a SYNC host command, it must + * return -ERFKILL straight away. + * May sleep only if CMD_ASYNC is not set + * @tx: send an skb + * Must be atomic + * @reclaim: free packet until ssn. Returns a list of freed packets. + * Must be atomic + * @txq_enable: setup a queue. To setup an AC queue, use the + * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before + * this one. The op_mode must not configure the HCMD queue. The scheduler + * configuration may be %NULL, in which case the hardware will not be + * configured. May sleep. + * @txq_disable: de-configure a Tx queue to send AMPDUs + * Must be atomic + * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. + * @freeze_txq_timer: prevents the timer of the queue from firing until the + * queue is set to awake. Must be atomic. + * @dbgfs_register: add the dbgfs files under this directory. Files will be + * automatically deleted. + * @write8: write a u8 to a register at offset ofs from the BAR + * @write32: write a u32 to a register at offset ofs from the BAR + * @read32: read a u32 register at offset ofs from the BAR + * @read_prph: read a DWORD from a periphery register + * @write_prph: write a DWORD to a periphery register + * @read_mem: read device's SRAM in DWORD + * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory + * will be zeroed. + * @configure: configure parameters required by the transport layer from + * the op_mode. May be called several times before start_fw, can't be + * called after that. + * @set_pmi: set the power pmi state + * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. + * Sleeping is not allowed between grab_nic_access and + * release_nic_access. + * @release_nic_access: let the NIC go to sleep. The "flags" parameter + * must be the same one that was sent before to the grab_nic_access. + * @set_bits_mask - set SRAM register according to value and mask. + * @ref: grab a reference to the transport/FW layers, disallowing + * certain low power states + * @unref: release a reference previously taken with @ref. Note that + * initially the reference count is 1, making an initial @unref + * necessary to allow low power states. + * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last + * TX'ed commands and similar. The buffer will be vfree'd by the caller. + * Note that the transport must fill in the proper file headers. + */ +struct iwl_trans_ops { + + int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); + void (*op_mode_leave)(struct iwl_trans *iwl_trans); + int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, + bool run_in_rfkill); + int (*update_sf)(struct iwl_trans *trans, + struct iwl_sf_region *st_fwrd_space); + void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); + void (*stop_device)(struct iwl_trans *trans, bool low_power); + + void (*d3_suspend)(struct iwl_trans *trans, bool test); + int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, + bool test); + + int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); + + int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int queue); + void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, + struct sk_buff_head *skbs); + + void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int queue_wdg_timeout); + void (*txq_disable)(struct iwl_trans *trans, int queue, + bool configure_scd); + + int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); + int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); + void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, + bool freeze); + + void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); + void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); + u32 (*read32)(struct iwl_trans *trans, u32 ofs); + u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); + void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); + int (*read_mem)(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); + int (*write_mem)(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords); + void (*configure)(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg); + void (*set_pmi)(struct iwl_trans *trans, bool state); + bool (*grab_nic_access)(struct iwl_trans *trans, bool silent, + unsigned long *flags); + void (*release_nic_access)(struct iwl_trans *trans, + unsigned long *flags); + void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, + u32 value); + void (*ref)(struct iwl_trans *trans); + void (*unref)(struct iwl_trans *trans); + int (*suspend)(struct iwl_trans *trans); + void (*resume)(struct iwl_trans *trans); + + struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv + *trigger); +}; + +/** + * enum iwl_trans_state - state of the transport layer + * + * @IWL_TRANS_NO_FW: no fw has sent an alive response + * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response + */ +enum iwl_trans_state { + IWL_TRANS_NO_FW = 0, + IWL_TRANS_FW_ALIVE = 1, +}; + +/** + * enum iwl_d0i3_mode - d0i3 mode + * + * @IWL_D0I3_MODE_OFF - d0i3 is disabled + * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle + * (e.g. no active references) + * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend + * (in case of 'any' trigger) + */ +enum iwl_d0i3_mode { + IWL_D0I3_MODE_OFF = 0, + IWL_D0I3_MODE_ON_IDLE, + IWL_D0I3_MODE_ON_SUSPEND, +}; + +/** + * struct iwl_trans - transport common data + * + * @ops - pointer to iwl_trans_ops + * @op_mode - pointer to the op_mode + * @cfg - pointer to the configuration + * @status: a bit-mask of transport status flags + * @dev - pointer to struct device * that represents the device + * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. + * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. + * @hw_id: a u32 with the ID of the device / sub-device. + * Set during transport allocation. + * @hw_id_str: a string with info about HW ID. Set during transport allocation. + * @pm_support: set to true in start_hw if link pm is supported + * @ltr_enabled: set to true if the LTR is enabled + * @num_rx_queues: number of RX queues allocated by the transport; + * the transport must set this before calling iwl_drv_start() + * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. + * The user should use iwl_trans_{alloc,free}_tx_cmd. + * @dev_cmd_headroom: room needed for the transport's private use before the + * device_cmd for Tx - for internal use only + * The user should use iwl_trans_{alloc,free}_tx_cmd. + * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before + * starting the firmware, used for tracing + * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the + * start of the 802.11 header in the @rx_mpdu_cmd + * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) + * @dbg_dest_tlv: points to the destination TLV for debug + * @dbg_conf_tlv: array of pointers to configuration TLVs for debug + * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug + * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv + * @paging_req_addr: The location were the FW will upload / download the pages + * from. The address is set by the opmode + * @paging_db: Pointer to the opmode paging data base, the pointer is set by + * the opmode. + * @paging_download_buf: Buffer used for copying all of the pages before + * downloading them to the FW. The buffer is allocated in the opmode + */ +struct iwl_trans { + const struct iwl_trans_ops *ops; + struct iwl_op_mode *op_mode; + const struct iwl_cfg *cfg; + enum iwl_trans_state state; + unsigned long status; + + struct device *dev; + u32 max_skb_frags; + u32 hw_rev; + u32 hw_id; + char hw_id_str[52]; + + u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; + + bool pm_support; + bool ltr_enabled; + + u8 num_rx_queues; + + /* The following fields are internal only */ + struct kmem_cache *dev_cmd_pool; + size_t dev_cmd_headroom; + char dev_cmd_pool_name[50]; + + struct dentry *dbgfs_dir; + +#ifdef CONFIG_LOCKDEP + struct lockdep_map sync_cmd_lockdep_map; +#endif + + u64 dflt_pwr_limit; + + const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; + u8 dbg_dest_reg_num; + + /* + * Paging parameters - All of the parameters should be set by the + * opmode when paging is enabled + */ + u32 paging_req_addr; + struct iwl_fw_paging *paging_db; + void *paging_download_buf; + + enum iwl_d0i3_mode d0i3_mode; + + bool wowlan_d0i3; + + /* pointer to trans specific struct */ + /*Ensure that this pointer will always be aligned to sizeof pointer */ + char trans_specific[0] __aligned(sizeof(void *)); +}; + +static inline void iwl_trans_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) +{ + trans->op_mode = trans_cfg->op_mode; + + trans->ops->configure(trans, trans_cfg); +} + +static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) +{ + might_sleep(); + + return trans->ops->start_hw(trans, low_power); +} + +static inline int iwl_trans_start_hw(struct iwl_trans *trans) +{ + return trans->ops->start_hw(trans, true); +} + +static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) +{ + might_sleep(); + + if (trans->ops->op_mode_leave) + trans->ops->op_mode_leave(trans); + + trans->op_mode = NULL; + + trans->state = IWL_TRANS_NO_FW; +} + +static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + might_sleep(); + + trans->state = IWL_TRANS_FW_ALIVE; + + trans->ops->fw_alive(trans, scd_addr); +} + +static inline int iwl_trans_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, + bool run_in_rfkill) +{ + might_sleep(); + + WARN_ON_ONCE(!trans->rx_mpdu_cmd); + + clear_bit(STATUS_FW_ERROR, &trans->status); + return trans->ops->start_fw(trans, fw, run_in_rfkill); +} + +static inline int iwl_trans_update_sf(struct iwl_trans *trans, + struct iwl_sf_region *st_fwrd_space) +{ + might_sleep(); + + if (trans->ops->update_sf) + return trans->ops->update_sf(trans, st_fwrd_space); + + return 0; +} + +static inline void _iwl_trans_stop_device(struct iwl_trans *trans, + bool low_power) +{ + might_sleep(); + + trans->ops->stop_device(trans, low_power); + + trans->state = IWL_TRANS_NO_FW; +} + +static inline void iwl_trans_stop_device(struct iwl_trans *trans) +{ + _iwl_trans_stop_device(trans, true); +} + +static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) +{ + might_sleep(); + if (trans->ops->d3_suspend) + trans->ops->d3_suspend(trans, test); +} + +static inline int iwl_trans_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test) +{ + might_sleep(); + if (!trans->ops->d3_resume) + return 0; + + return trans->ops->d3_resume(trans, status, test); +} + +static inline void iwl_trans_ref(struct iwl_trans *trans) +{ + if (trans->ops->ref) + trans->ops->ref(trans); +} + +static inline void iwl_trans_unref(struct iwl_trans *trans) +{ + if (trans->ops->unref) + trans->ops->unref(trans); +} + +static inline int iwl_trans_suspend(struct iwl_trans *trans) +{ + if (!trans->ops->suspend) + return 0; + + return trans->ops->suspend(trans); +} + +static inline void iwl_trans_resume(struct iwl_trans *trans) +{ + if (trans->ops->resume) + trans->ops->resume(trans); +} + +static inline struct iwl_trans_dump_data * +iwl_trans_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) +{ + if (!trans->ops->dump_data) + return NULL; + return trans->ops->dump_data(trans, trigger); +} + +static inline int iwl_trans_send_cmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + int ret; + + if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status))) + return -ERFKILL; + + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } + + if (!(cmd->flags & CMD_ASYNC)) + lock_map_acquire_read(&trans->sync_cmd_lockdep_map); + + ret = trans->ops->send_cmd(trans, cmd); + + if (!(cmd->flags & CMD_ASYNC)) + lock_map_release(&trans->sync_cmd_lockdep_map); + + return ret; +} + +static inline struct iwl_device_cmd * +iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) +{ + u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); + + if (unlikely(dev_cmd_ptr == NULL)) + return NULL; + + return (struct iwl_device_cmd *) + (dev_cmd_ptr + trans->dev_cmd_headroom); +} + +static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, + struct iwl_device_cmd *dev_cmd) +{ + u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom; + + kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr); +} + +static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int queue) +{ + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + + return trans->ops->tx(trans, skb, dev_cmd, queue); +} + +static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, + int ssn, struct sk_buff_head *skbs) +{ + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + + trans->ops->reclaim(trans, queue, ssn, skbs); +} + +static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd) +{ + trans->ops->txq_disable(trans, queue, configure_scd); +} + +static inline void +iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int queue_wdg_timeout) +{ + might_sleep(); + + if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + + trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); +} + +static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn, + unsigned int queue_wdg_timeout) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = sta_id, + .tid = tid, + .frame_limit = frame_limit, + .aggregate = sta_id >= 0, + }; + + iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); +} + +static inline +void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, + unsigned int queue_wdg_timeout) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = -1, + .tid = IWL_MAX_TID_COUNT, + .frame_limit = IWL_FRAME_LIMIT, + .aggregate = false, + }; + + iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); +} + +static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, + bool freeze) +{ + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + + if (trans->ops->freeze_txq_timer) + trans->ops->freeze_txq_timer(trans, txqs, freeze); +} + +static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, + u32 txqs) +{ + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + + return trans->ops->wait_tx_queue_empty(trans, txqs); +} + +static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, + struct dentry *dir) +{ + return trans->ops->dbgfs_register(trans, dir); +} + +static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) +{ + trans->ops->write8(trans, ofs, val); +} + +static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) +{ + trans->ops->write32(trans, ofs, val); +} + +static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) +{ + return trans->ops->read32(trans, ofs); +} + +static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) +{ + return trans->ops->read_prph(trans, ofs); +} + +static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, + u32 val) +{ + return trans->ops->write_prph(trans, ofs, val); +} + +static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + return trans->ops->read_mem(trans, addr, buf, dwords); +} + +#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ + do { \ + if (__builtin_constant_p(bufsize)) \ + BUILD_BUG_ON((bufsize) % sizeof(u32)); \ + iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ + } while (0) + +static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) +{ + u32 value; + + if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) + return 0xa5a5a5a5; + + return value; +} + +static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) +{ + return trans->ops->write_mem(trans, addr, buf, dwords); +} + +static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, + u32 val) +{ + return iwl_trans_write_mem(trans, addr, &val, 1); +} + +static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) +{ + if (trans->ops->set_pmi) + trans->ops->set_pmi(trans, state); +} + +static inline void +iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) +{ + trans->ops->set_bits_mask(trans, reg, mask, value); +} + +#define iwl_trans_grab_nic_access(trans, silent, flags) \ + __cond_lock(nic_access, \ + likely((trans)->ops->grab_nic_access(trans, silent, flags))) + +static inline void __releases(nic_access) +iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) +{ + trans->ops->release_nic_access(trans, flags); + __release(nic_access); +} + +static inline void iwl_trans_fw_error(struct iwl_trans *trans) +{ + if (WARN_ON_ONCE(!trans->op_mode)) + return; + + /* prevent double restarts due to the same erroneous FW */ + if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) + iwl_op_mode_nic_error(trans->op_mode); +} + +/***************************************************** + * transport helper functions + *****************************************************/ +struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, + struct device *dev, + const struct iwl_cfg *cfg, + const struct iwl_trans_ops *ops, + size_t dev_cmd_headroom); +void iwl_trans_free(struct iwl_trans *trans); + +/***************************************************** +* driver (transport) register/unregister functions +******************************************************/ +int __must_check iwl_pci_register_driver(void); +void iwl_pci_unregister_driver(void); + +#endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile new file mode 100644 index 000000000000..8c2c3d13b092 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_IWLMVM) += iwlmvm.o +iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o +iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o +iwlmvm-y += scan.o time-event.o rs.o +iwlmvm-y += power.o coex.o coex_legacy.o +iwlmvm-y += tt.o offloading.o tdls.o +iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o +iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o +iwlmvm-y += tof.o +iwlmvm-$(CONFIG_PM_SLEEP) += d3.o + +ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c new file mode 100644 index 000000000000..a1376539d2dc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -0,0 +1,211 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include "fw-api.h" +#include "mvm.h" + +struct iwl_mvm_iface_iterator_data { + struct ieee80211_vif *ignore_vif; + int idx; + + struct iwl_mvm_phy_ctxt *phyctxt; + + u16 ids[MAX_MACS_IN_BINDING]; + u16 colors[MAX_MACS_IN_BINDING]; +}; + +static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, + struct iwl_mvm_iface_iterator_data *data) +{ + struct iwl_binding_cmd cmd; + struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; + int i, ret; + u32 status; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, + phyctxt->color)); + cmd.action = cpu_to_le32(action); + cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, + phyctxt->color)); + + for (i = 0; i < MAX_MACS_IN_BINDING; i++) + cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); + for (i = 0; i < data->idx; i++) + cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], + data->colors[i])); + + status = 0; + ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, + sizeof(cmd), &cmd, &status); + if (ret) { + IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", + action, ret); + return ret; + } + + if (status) { + IWL_ERR(mvm, "Binding command failed: %u\n", status); + ret = -EIO; + } + + return ret; +} + +static void iwl_mvm_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_iface_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif == data->ignore_vif) + return; + + if (mvmvif->phy_ctxt != data->phyctxt) + return; + + if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) + return; + + data->ids[data->idx] = mvmvif->id; + data->colors[data->idx] = mvmvif->color; + data->idx++; +} + +static int iwl_mvm_binding_update(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *phyctxt, + bool add) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_iface_iterator_data data = { + .ignore_vif = vif, + .phyctxt = phyctxt, + }; + u32 action = FW_CTXT_ACTION_MODIFY; + + lockdep_assert_held(&mvm->mutex); + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_iface_iterator, + &data); + + /* + * If there are no other interfaces yet we + * need to create a new binding. + */ + if (data.idx == 0) { + if (add) + action = FW_CTXT_ACTION_ADD; + else + action = FW_CTXT_ACTION_REMOVE; + } + + if (add) { + if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) + return -EINVAL; + + data.ids[data.idx] = mvmvif->id; + data.colors[data.idx] = mvmvif->color; + data.idx++; + } + + return iwl_mvm_binding_cmd(mvm, action, &data); +} + +int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + return -EINVAL; + + /* + * Update SF - Disable if needed. if this fails, SF might still be on + * while many macs are bound, which is forbidden - so fail the binding. + */ + if (iwl_mvm_sf_update(mvm, vif, false)) + return -EINVAL; + + return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true); +} + +int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + return -EINVAL; + + ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false); + + if (!ret) + if (iwl_mvm_sf_update(mvm, vif, true)) + IWL_ERR(mvm, "Failed to update SF state\n"); + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c new file mode 100644 index 000000000000..e290ac67d975 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -0,0 +1,1005 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include + +#include "fw-api-coex.h" +#include "iwl-modparams.h" +#include "mvm.h" +#include "iwl-debug.h" + +/* 20MHz / 40MHz below / 40Mhz above*/ +static const __le64 iwl_ci_mask[][3] = { + /* dummy entry for channel 0 */ + {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, + { + cpu_to_le64(0x0000001FFFULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x00007FFFFFULL), + }, + { + cpu_to_le64(0x000000FFFFULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x0003FFFFFFULL), + }, + { + cpu_to_le64(0x000003FFFCULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x000FFFFFFCULL), + }, + { + cpu_to_le64(0x00001FFFE0ULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x007FFFFFE0ULL), + }, + { + cpu_to_le64(0x00007FFF80ULL), + cpu_to_le64(0x00007FFFFFULL), + cpu_to_le64(0x01FFFFFF80ULL), + }, + { + cpu_to_le64(0x0003FFFC00ULL), + cpu_to_le64(0x0003FFFFFFULL), + cpu_to_le64(0x0FFFFFFC00ULL), + }, + { + cpu_to_le64(0x000FFFF000ULL), + cpu_to_le64(0x000FFFFFFCULL), + cpu_to_le64(0x3FFFFFF000ULL), + }, + { + cpu_to_le64(0x007FFF8000ULL), + cpu_to_le64(0x007FFFFFE0ULL), + cpu_to_le64(0xFFFFFF8000ULL), + }, + { + cpu_to_le64(0x01FFFE0000ULL), + cpu_to_le64(0x01FFFFFF80ULL), + cpu_to_le64(0xFFFFFE0000ULL), + }, + { + cpu_to_le64(0x0FFFF00000ULL), + cpu_to_le64(0x0FFFFFFC00ULL), + cpu_to_le64(0x0ULL), + }, + { + cpu_to_le64(0x3FFFC00000ULL), + cpu_to_le64(0x3FFFFFF000ULL), + cpu_to_le64(0x0) + }, + { + cpu_to_le64(0xFFFE000000ULL), + cpu_to_le64(0xFFFFFF8000ULL), + cpu_to_le64(0x0) + }, + { + cpu_to_le64(0xFFF8000000ULL), + cpu_to_le64(0xFFFFFE0000ULL), + cpu_to_le64(0x0) + }, + { + cpu_to_le64(0xFFC0000000ULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x0ULL) + }, +}; + +struct corunning_block_luts { + u8 range; + __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; +}; + +/* + * Ranges for the antenna coupling calibration / co-running block LUT: + * LUT0: [ 0, 12[ + * LUT1: [12, 20[ + * LUT2: [20, 21[ + * LUT3: [21, 23[ + * LUT4: [23, 27[ + * LUT5: [27, 30[ + * LUT6: [30, 32[ + * LUT7: [32, 33[ + * LUT8: [33, - [ + */ +static const struct corunning_block_luts antenna_coupling_ranges[] = { + { + .range = 0, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 12, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 20, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 21, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 23, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 27, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 30, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 32, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 33, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, +}; + +static enum iwl_bt_coex_lut_type +iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + enum iwl_bt_coex_lut_type ret; + u16 phy_ctx_id; + u32 primary_ch_phy_id, secondary_ch_phy_id; + + /* + * Checking that we hold mvm->mutex is a good idea, but the rate + * control can't acquire the mutex since it runs in Tx path. + * So this is racy in that case, but in the worst case, the AMPDU + * size limit will be wrong for a short time which is not a big + * issue. + */ + + rcu_read_lock(); + + chanctx_conf = rcu_dereference(vif->chanctx_conf); + + if (!chanctx_conf || + chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { + rcu_read_unlock(); + return BT_COEX_INVALID_LUT; + } + + ret = BT_COEX_TX_DIS_LUT; + + if (mvm->cfg->bt_shared_single_ant) { + rcu_read_unlock(); + return ret; + } + + phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); + primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id); + secondary_ch_phy_id = + le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id); + + if (primary_ch_phy_id == phy_ctx_id) + ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut); + else if (secondary_ch_phy_id == phy_ctx_id) + ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut); + /* else - default = TX TX disallowed */ + + rcu_read_unlock(); + + return ret; +} + +int iwl_send_bt_init_conf(struct iwl_mvm *mvm) +{ + struct iwl_bt_coex_cmd bt_cmd = {}; + u32 mode; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_send_bt_init_conf_old(mvm); + + lockdep_assert_held(&mvm->mutex); + + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { + switch (mvm->bt_force_ant_mode) { + case BT_FORCE_ANT_BT: + mode = BT_COEX_BT; + break; + case BT_FORCE_ANT_WIFI: + mode = BT_COEX_WIFI; + break; + default: + WARN_ON(1); + mode = 0; + } + + bt_cmd.mode = cpu_to_le32(mode); + goto send_cmd; + } + + mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; + bt_cmd.mode = cpu_to_le32(mode); + + if (IWL_MVM_BT_COEX_SYNC2SCO) + bt_cmd.enabled_modules |= + cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); + + if (iwl_mvm_bt_is_plcr_supported(mvm)) + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); + + if (IWL_MVM_BT_COEX_MPLUT) { + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); + bt_cmd.enabled_modules |= + cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED); + } + + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); + +send_cmd: + memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); + memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); + + return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd); +} + +static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, + bool enable) +{ + struct iwl_bt_coex_reduced_txp_update_cmd cmd = {}; + struct iwl_mvm_sta *mvmsta; + u32 value; + int ret; + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + if (!mvmsta) + return 0; + + /* nothing to do */ + if (mvmsta->bt_reduced_txpower == enable) + return 0; + + value = mvmsta->sta_id; + + if (enable) + value |= BT_REDUCED_TX_POWER_BIT; + + IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", + enable ? "en" : "dis", sta_id); + + cmd.reduced_txp = cpu_to_le32(value); + mvmsta->bt_reduced_txpower = enable; + + ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, + sizeof(cmd), &cmd); + + return ret; +} + +struct iwl_bt_iterator_data { + struct iwl_bt_coex_profile_notif *notif; + struct iwl_mvm *mvm; + struct ieee80211_chanctx_conf *primary; + struct ieee80211_chanctx_conf *secondary; + bool primary_ll; +}; + +static inline +void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool enable, int rssi) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->bf_data.last_bt_coex_event = rssi; + mvmvif->bf_data.bt_coex_max_thold = + enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; + mvmvif->bf_data.bt_coex_min_thold = + enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; +} + +/* must be called under rcu_read_lock */ +static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_bt_iterator_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct ieee80211_chanctx_conf *chanctx_conf; + /* default smps_mode is AUTOMATIC - only used for client modes */ + enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; + u32 bt_activity_grading; + int ave_rssi; + + lockdep_assert_held(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_AP: + if (!mvmvif->ap_ibss_active) + return; + break; + default: + return; + } + + chanctx_conf = rcu_dereference(vif->chanctx_conf); + + /* If channel context is invalid or not on 2.4GHz .. */ + if ((!chanctx_conf || + chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { + if (vif->type == NL80211_IFTYPE_STATION) { + /* ... relax constraints and disable rssi events */ + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, + smps_mode); + iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + false); + iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + } + return; + } + + bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); + if (bt_activity_grading >= BT_HIGH_TRAFFIC) + smps_mode = IEEE80211_SMPS_STATIC; + else if (bt_activity_grading >= BT_LOW_TRAFFIC) + smps_mode = IEEE80211_SMPS_DYNAMIC; + + /* relax SMPS constraints for next association */ + if (!vif->bss_conf.assoc) + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + if (mvmvif->phy_ctxt && + IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, + mvmvif->phy_ctxt->id)) + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + IWL_DEBUG_COEX(data->mvm, + "mac %d: bt_activity_grading %d smps_req %d\n", + mvmvif->id, bt_activity_grading, smps_mode); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, + smps_mode); + + /* low latency is always primary */ + if (iwl_mvm_vif_low_latency(mvmvif)) { + data->primary_ll = true; + + data->secondary = data->primary; + data->primary = chanctx_conf; + } + + if (vif->type == NL80211_IFTYPE_AP) { + if (!mvmvif->ap_ibss_active) + return; + + if (chanctx_conf == data->primary) + return; + + if (!data->primary_ll) { + /* + * downgrade the current primary no matter what its + * type is. + */ + data->secondary = data->primary; + data->primary = chanctx_conf; + } else { + /* there is low latency vif - we will be secondary */ + data->secondary = chanctx_conf; + } + return; + } + + /* + * STA / P2P Client, try to be primary if first vif. If we are in low + * latency mode, we are already in primary and just don't do much + */ + if (!data->primary || data->primary == chanctx_conf) + data->primary = chanctx_conf; + else if (!data->secondary) + /* if secondary is not NULL, it might be a GO */ + data->secondary = chanctx_conf; + + /* + * don't reduce the Tx power if one of these is true: + * we are in LOOSE + * single share antenna product + * BT is active + * we are associated + */ + if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || + mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || + le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) { + iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); + iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + return; + } + + /* try to get the avg rssi from fw */ + ave_rssi = mvmvif->bf_data.ave_beacon_signal; + + /* if the RSSI isn't valid, fake it is very low */ + if (!ave_rssi) + ave_rssi = -100; + if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { + if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) + IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); + } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { + if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) + IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); + } + + /* Begin to monitor the RSSI: it may influence the reduced Tx power */ + iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); +} + +static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) +{ + struct iwl_bt_iterator_data data = { + .mvm = mvm, + .notif = &mvm->last_bt_notif, + }; + struct iwl_bt_coex_ci_cmd cmd = {}; + u8 ci_bw_idx; + + /* Ignore updates if we are in force mode */ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return; + + rcu_read_lock(); + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bt_notif_iterator, &data); + + if (data.primary) { + struct ieee80211_chanctx_conf *chan = data.primary; + if (WARN_ON(!chan->def.chan)) { + rcu_read_unlock(); + return; + } + + if (chan->def.width < NL80211_CHAN_WIDTH_40) { + ci_bw_idx = 0; + } else { + if (chan->def.center_freq1 > + chan->def.chan->center_freq) + ci_bw_idx = 2; + else + ci_bw_idx = 1; + } + + cmd.bt_primary_ci = + iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; + cmd.primary_ch_phy_id = + cpu_to_le32(*((u16 *)data.primary->drv_priv)); + } + + if (data.secondary) { + struct ieee80211_chanctx_conf *chan = data.secondary; + if (WARN_ON(!data.secondary->def.chan)) { + rcu_read_unlock(); + return; + } + + if (chan->def.width < NL80211_CHAN_WIDTH_40) { + ci_bw_idx = 0; + } else { + if (chan->def.center_freq1 > + chan->def.chan->center_freq) + ci_bw_idx = 2; + else + ci_bw_idx = 1; + } + + cmd.bt_secondary_ci = + iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; + cmd.secondary_ch_phy_id = + cpu_to_le32(*((u16 *)data.secondary->drv_priv)); + } + + rcu_read_unlock(); + + /* Don't spam the fw with the same command over and over */ + if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) { + if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, + sizeof(cmd), &cmd)) + IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); + memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); + } +} + +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_rx_bt_coex_notif_old(mvm, rxb); + return; + } + + IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); + IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); + IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", + le32_to_cpu(notif->primary_ch_lut)); + IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", + le32_to_cpu(notif->bt_activity_grading)); + + /* remember this notification for future use: rssi fluctuations */ + memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); + + iwl_mvm_bt_coex_notif_handle(mvm); +} + +void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum ieee80211_rssi_event_data rssi_event) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event); + return; + } + + lockdep_assert_held(&mvm->mutex); + + /* Ignore updates if we are in force mode */ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return; + + /* + * Rssi update while not associated - can happen since the statistics + * are handled asynchronously + */ + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + return; + + /* No BT - reports should be disabled */ + if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) + return; + + IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, + rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); + + /* + * Check if rssi is good enough for reduced Tx power, but not in loose + * scheme. + */ + if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || + iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) + ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + false); + else + ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); + + if (ret) + IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); +} + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) +#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) + +u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; + enum iwl_bt_coex_lut_type lut_type; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_mvm_coex_agg_time_limit_old(mvm, sta); + + if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) + return LINK_QUAL_AGG_TIME_LIMIT_DEF; + + if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < + BT_HIGH_TRAFFIC) + return LINK_QUAL_AGG_TIME_LIMIT_DEF; + + lut_type = iwl_get_coex_type(mvm, mvmsta->vif); + + if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) + return LINK_QUAL_AGG_TIME_LIMIT_DEF; + + /* tight coex, high bt traffic, reduce AGG time limit */ + return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; +} + +bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; + enum iwl_bt_coex_lut_type lut_type; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta); + + if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) + return true; + + if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < + BT_HIGH_TRAFFIC) + return true; + + /* + * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas + * since BT is already killed. + * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while + * we Tx. + * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. + */ + lut_type = iwl_get_coex_type(mvm, mvmsta->vif); + return lut_type != BT_COEX_LOOSE_LUT; +} + +bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) +{ + /* there is no other antenna, shared antenna is always available */ + if (mvm->cfg->bt_shared_single_ant) + return true; + + if (ant & mvm->cfg->non_shared_ant) + return true; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); + + return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < + BT_HIGH_TRAFFIC; +} + +bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) +{ + /* there is no other antenna, shared antenna is always available */ + if (mvm->cfg->bt_shared_single_ant) + return true; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); + + return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; +} + +bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, + enum ieee80211_band band) +{ + u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band); + + if (band != IEEE80211_BAND_2GHZ) + return false; + + return bt_activity >= BT_LOW_TRAFFIC; +} + +u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info, u8 ac) +{ + __le16 fc = hdr->frame_control; + + if (info->band != IEEE80211_BAND_2GHZ) + return 0; + + if (unlikely(mvm->bt_tx_prio)) + return mvm->bt_tx_prio - 1; + + /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */ + if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO || + is_multicast_ether_addr(hdr->addr1) || + ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) || + ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) + return 3; + + switch (ac) { + case IEEE80211_AC_BE: + return 1; + case IEEE80211_AC_VO: + return 3; + case IEEE80211_AC_VI: + return 2; + default: + break; + } + + return 0; +} + +void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) +{ + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_bt_coex_vif_change_old(mvm); + return; + } + + iwl_mvm_bt_coex_notif_handle(mvm); +} + +void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 ant_isolation = le32_to_cpup((void *)pkt->data); + struct iwl_bt_coex_corun_lut_update_cmd cmd = {}; + u8 __maybe_unused lower_bound, upper_bound; + u8 lut; + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb); + return; + } + + if (!iwl_mvm_bt_is_plcr_supported(mvm)) + return; + + lockdep_assert_held(&mvm->mutex); + + /* Ignore updates if we are in force mode */ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return; + + if (ant_isolation == mvm->last_ant_isol) + return; + + for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) + if (ant_isolation < antenna_coupling_ranges[lut + 1].range) + break; + + lower_bound = antenna_coupling_ranges[lut].range; + + if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) + upper_bound = antenna_coupling_ranges[lut + 1].range; + else + upper_bound = antenna_coupling_ranges[lut].range; + + IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", + ant_isolation, lower_bound, upper_bound, lut); + + mvm->last_ant_isol = ant_isolation; + + if (mvm->last_corun_lut == lut) + return; + + mvm->last_corun_lut = lut; + + /* For the moment, use the same LUT for 20GHz and 40GHz */ + memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20, + sizeof(cmd.corun_lut20)); + + memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20, + sizeof(cmd.corun_lut40)); + + if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, + sizeof(cmd), &cmd)) + IWL_ERR(mvm, + "failed to send BT_COEX_UPDATE_CORUN_LUT command\n"); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c new file mode 100644 index 000000000000..61c07b05fcaa --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c @@ -0,0 +1,1315 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include + +#include "fw-api-coex.h" +#include "iwl-modparams.h" +#include "mvm.h" +#include "iwl-debug.h" + +#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \ + [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \ + ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS)) + +static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1, + BT_COEX_PRIO_TBL_PRIO_BYPASS, 0), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2, + BT_COEX_PRIO_TBL_PRIO_BYPASS, 1), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1, + BT_COEX_PRIO_TBL_PRIO_LOW, 0), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2, + BT_COEX_PRIO_TBL_PRIO_LOW, 1), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1, + BT_COEX_PRIO_TBL_PRIO_HIGH, 0), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2, + BT_COEX_PRIO_TBL_PRIO_HIGH, 1), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM, + BT_COEX_PRIO_TBL_DISABLED, 0), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52, + BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24, + BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0), + EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE, + BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0), + 0, 0, 0, 0, 0, 0, +}; + +#undef EVENT_PRIO_ANT + +static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) +{ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return 0; + + return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0, + sizeof(struct iwl_bt_coex_prio_tbl_cmd), + &iwl_bt_prio_tbl); +} + +static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { + cpu_to_le32(0xf0f0f0f0), /* 50% */ + cpu_to_le32(0xc0c0c0c0), /* 25% */ + cpu_to_le32(0xfcfcfcfc), /* 75% */ + cpu_to_le32(0xfefefefe), /* 87.5% */ +}; + +static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { + { + cpu_to_le32(0x40000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x44000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x40000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x44000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + }, + { + cpu_to_le32(0x40000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x44000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x40000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x44000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + }, + { + cpu_to_le32(0x40000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x44000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x40000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0x44000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + }, +}; + +static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { + { + /* Tight */ + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaeaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xcc00ff28), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xcc00aaaa), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xc0004000), + cpu_to_le32(0x00004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), + }, + { + /* Loose */ + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xcc00ff28), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xcc00aaaa), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), + }, + { + /* Tx Tx disabled */ + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xeeaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xcc00ff28), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xcc00aaaa), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), + }, +}; + +/* 20MHz / 40MHz below / 40Mhz above*/ +static const __le64 iwl_ci_mask[][3] = { + /* dummy entry for channel 0 */ + {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, + { + cpu_to_le64(0x0000001FFFULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x00007FFFFFULL), + }, + { + cpu_to_le64(0x000000FFFFULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x0003FFFFFFULL), + }, + { + cpu_to_le64(0x000003FFFCULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x000FFFFFFCULL), + }, + { + cpu_to_le64(0x00001FFFE0ULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x007FFFFFE0ULL), + }, + { + cpu_to_le64(0x00007FFF80ULL), + cpu_to_le64(0x00007FFFFFULL), + cpu_to_le64(0x01FFFFFF80ULL), + }, + { + cpu_to_le64(0x0003FFFC00ULL), + cpu_to_le64(0x0003FFFFFFULL), + cpu_to_le64(0x0FFFFFFC00ULL), + }, + { + cpu_to_le64(0x000FFFF000ULL), + cpu_to_le64(0x000FFFFFFCULL), + cpu_to_le64(0x3FFFFFF000ULL), + }, + { + cpu_to_le64(0x007FFF8000ULL), + cpu_to_le64(0x007FFFFFE0ULL), + cpu_to_le64(0xFFFFFF8000ULL), + }, + { + cpu_to_le64(0x01FFFE0000ULL), + cpu_to_le64(0x01FFFFFF80ULL), + cpu_to_le64(0xFFFFFE0000ULL), + }, + { + cpu_to_le64(0x0FFFF00000ULL), + cpu_to_le64(0x0FFFFFFC00ULL), + cpu_to_le64(0x0ULL), + }, + { + cpu_to_le64(0x3FFFC00000ULL), + cpu_to_le64(0x3FFFFFF000ULL), + cpu_to_le64(0x0) + }, + { + cpu_to_le64(0xFFFE000000ULL), + cpu_to_le64(0xFFFFFF8000ULL), + cpu_to_le64(0x0) + }, + { + cpu_to_le64(0xFFF8000000ULL), + cpu_to_le64(0xFFFFFE0000ULL), + cpu_to_le64(0x0) + }, + { + cpu_to_le64(0xFFC0000000ULL), + cpu_to_le64(0x0ULL), + cpu_to_le64(0x0ULL) + }, +}; + +enum iwl_bt_kill_msk { + BT_KILL_MSK_DEFAULT, + BT_KILL_MSK_NEVER, + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_MAX, +}; + +static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { + [BT_KILL_MSK_DEFAULT] = 0xfffffc00, + [BT_KILL_MSK_NEVER] = 0xffffffff, + [BT_KILL_MSK_ALWAYS] = 0, +}; + +static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { + { + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + }, + { + BT_KILL_MSK_NEVER, + BT_KILL_MSK_NEVER, + BT_KILL_MSK_NEVER, + }, + { + BT_KILL_MSK_NEVER, + BT_KILL_MSK_NEVER, + BT_KILL_MSK_NEVER, + }, + { + BT_KILL_MSK_DEFAULT, + BT_KILL_MSK_NEVER, + BT_KILL_MSK_DEFAULT, + }, +}; + +static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { + { + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + }, + { + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + }, + { + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_ALWAYS, + }, + { + BT_KILL_MSK_DEFAULT, + BT_KILL_MSK_ALWAYS, + BT_KILL_MSK_DEFAULT, + }, +}; + +struct corunning_block_luts { + u8 range; + __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; +}; + +/* + * Ranges for the antenna coupling calibration / co-running block LUT: + * LUT0: [ 0, 12[ + * LUT1: [12, 20[ + * LUT2: [20, 21[ + * LUT3: [21, 23[ + * LUT4: [23, 27[ + * LUT5: [27, 30[ + * LUT6: [30, 32[ + * LUT7: [32, 33[ + * LUT8: [33, - [ + */ +static const struct corunning_block_luts antenna_coupling_ranges[] = { + { + .range = 0, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 12, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 20, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 21, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 23, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 27, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 30, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 32, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, + { + .range = 33, + .lut20 = { + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), + }, + }, +}; + +static enum iwl_bt_coex_lut_type +iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + enum iwl_bt_coex_lut_type ret; + u16 phy_ctx_id; + + /* + * Checking that we hold mvm->mutex is a good idea, but the rate + * control can't acquire the mutex since it runs in Tx path. + * So this is racy in that case, but in the worst case, the AMPDU + * size limit will be wrong for a short time which is not a big + * issue. + */ + + rcu_read_lock(); + + chanctx_conf = rcu_dereference(vif->chanctx_conf); + + if (!chanctx_conf || + chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { + rcu_read_unlock(); + return BT_COEX_INVALID_LUT; + } + + ret = BT_COEX_TX_DIS_LUT; + + if (mvm->cfg->bt_shared_single_ant) { + rcu_read_unlock(); + return ret; + } + + phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); + + if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id) + ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut); + else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id) + ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut); + /* else - default = TX TX disallowed */ + + rcu_read_unlock(); + + return ret; +} + +int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) +{ + struct iwl_bt_coex_cmd_old *bt_cmd; + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + int ret; + u32 flags; + + ret = iwl_send_bt_prio_tbl(mvm); + if (ret) + return ret; + + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); + if (!bt_cmd) + return -ENOMEM; + cmd.data[0] = bt_cmd; + + lockdep_assert_held(&mvm->mutex); + + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { + switch (mvm->bt_force_ant_mode) { + case BT_FORCE_ANT_AUTO: + flags = BT_COEX_AUTO_OLD; + break; + case BT_FORCE_ANT_BT: + flags = BT_COEX_BT_OLD; + break; + case BT_FORCE_ANT_WIFI: + flags = BT_COEX_WIFI_OLD; + break; + default: + WARN_ON(1); + flags = 0; + } + + bt_cmd->flags = cpu_to_le32(flags); + bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE); + goto send_cmd; + } + + bt_cmd->max_kill = 5; + bt_cmd->bt4_antenna_isolation_thr = + IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS; + bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling; + bt_cmd->bt4_tx_tx_delta_freq_thr = 15; + bt_cmd->bt4_tx_rx_max_freq0 = 15; + bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT; + bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT; + + flags = iwlwifi_mod_params.bt_coex_active ? + BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD; + bt_cmd->flags = cpu_to_le32(flags); + + bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE | + BT_VALID_BT_PRIO_BOOST | + BT_VALID_MAX_KILL | + BT_VALID_3W_TMRS | + BT_VALID_KILL_ACK | + BT_VALID_KILL_CTS | + BT_VALID_REDUCED_TX_POWER | + BT_VALID_LUT | + BT_VALID_WIFI_RX_SW_PRIO_BOOST | + BT_VALID_WIFI_TX_SW_PRIO_BOOST | + BT_VALID_ANT_ISOLATION | + BT_VALID_ANT_ISOLATION_THRS | + BT_VALID_TXTX_DELTA_FREQ_THRS | + BT_VALID_TXRX_MAX_FREQ_0 | + BT_VALID_SYNC_TO_SCO | + BT_VALID_TTC | + BT_VALID_RRC); + + if (IWL_MVM_BT_COEX_SYNC2SCO) + bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); + + if (iwl_mvm_bt_is_plcr_supported(mvm)) { + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | + BT_VALID_CORUN_LUT_40); + bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); + } + + if (IWL_MVM_BT_COEX_MPLUT) { + bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); + } + + if (IWL_MVM_BT_COEX_TTC) + bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); + + if (iwl_mvm_bt_is_rrc_supported(mvm)) + bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); + + if (mvm->cfg->bt_shared_single_ant) + memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, + sizeof(iwl_single_shared_ant)); + else + memcpy(&bt_cmd->decision_lut, iwl_combined_lookup, + sizeof(iwl_combined_lookup)); + + /* Take first Co-running block LUT to get started */ + memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20, + sizeof(bt_cmd->bt4_corun_lut20)); + memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20, + sizeof(bt_cmd->bt4_corun_lut40)); + + memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost, + sizeof(iwl_bt_prio_boost)); + bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); + bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); + +send_cmd: + memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); + memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(bt_cmd); + return ret; +} + +static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm) +{ + struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old; + u32 primary_lut = le32_to_cpu(notif->primary_ch_lut); + u32 ag = le32_to_cpu(notif->bt_activity_grading); + struct iwl_bt_coex_cmd_old *bt_cmd; + u8 ack_kill_msk, cts_kill_msk; + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .data[0] = &bt_cmd, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut]; + cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut]; + + if (mvm->bt_ack_kill_msk[0] == ack_kill_msk && + mvm->bt_cts_kill_msk[0] == cts_kill_msk) + return 0; + + mvm->bt_ack_kill_msk[0] = ack_kill_msk; + mvm->bt_cts_kill_msk[0] = cts_kill_msk; + + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); + if (!bt_cmd) + return -ENOMEM; + cmd.data[0] = bt_cmd; + bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); + + bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]); + bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | + BT_VALID_KILL_ACK | + BT_VALID_KILL_CTS); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(bt_cmd); + return ret; +} + +static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, + bool enable) +{ + struct iwl_bt_coex_cmd_old *bt_cmd; + /* Send ASYNC since this can be sent from an atomic context */ + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_DUP, }, + .flags = CMD_ASYNC, + }; + struct iwl_mvm_sta *mvmsta; + int ret; + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + if (!mvmsta) + return 0; + + /* nothing to do */ + if (mvmsta->bt_reduced_txpower == enable) + return 0; + + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); + if (!bt_cmd) + return -ENOMEM; + cmd.data[0] = bt_cmd; + bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); + + bt_cmd->valid_bit_msk = + cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER); + bt_cmd->bt_reduced_tx_power = sta_id; + + if (enable) + bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; + + IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", + enable ? "en" : "dis", sta_id); + + mvmsta->bt_reduced_txpower = enable; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(bt_cmd); + return ret; +} + +struct iwl_bt_iterator_data { + struct iwl_bt_coex_profile_notif_old *notif; + struct iwl_mvm *mvm; + struct ieee80211_chanctx_conf *primary; + struct ieee80211_chanctx_conf *secondary; + bool primary_ll; +}; + +static inline +void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool enable, int rssi) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->bf_data.last_bt_coex_event = rssi; + mvmvif->bf_data.bt_coex_max_thold = + enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; + mvmvif->bf_data.bt_coex_min_thold = + enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; +} + +/* must be called under rcu_read_lock */ +static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_bt_iterator_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct ieee80211_chanctx_conf *chanctx_conf; + enum ieee80211_smps_mode smps_mode; + u32 bt_activity_grading; + int ave_rssi; + + lockdep_assert_held(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + /* default smps_mode for BSS / P2P client is AUTOMATIC */ + smps_mode = IEEE80211_SMPS_AUTOMATIC; + break; + case NL80211_IFTYPE_AP: + if (!mvmvif->ap_ibss_active) + return; + break; + default: + return; + } + + chanctx_conf = rcu_dereference(vif->chanctx_conf); + + /* If channel context is invalid or not on 2.4GHz .. */ + if ((!chanctx_conf || + chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { + if (vif->type == NL80211_IFTYPE_STATION) { + /* ... relax constraints and disable rssi events */ + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, + smps_mode); + iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + false); + iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + } + return; + } + + bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); + if (bt_activity_grading >= BT_HIGH_TRAFFIC) + smps_mode = IEEE80211_SMPS_STATIC; + else if (bt_activity_grading >= BT_LOW_TRAFFIC) + smps_mode = vif->type == NL80211_IFTYPE_AP ? + IEEE80211_SMPS_OFF : + IEEE80211_SMPS_DYNAMIC; + + /* relax SMPS contraints for next association */ + if (!vif->bss_conf.assoc) + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + if (mvmvif->phy_ctxt && + data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + IWL_DEBUG_COEX(data->mvm, + "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", + mvmvif->id, data->notif->bt_status, bt_activity_grading, + smps_mode); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, + smps_mode); + + /* low latency is always primary */ + if (iwl_mvm_vif_low_latency(mvmvif)) { + data->primary_ll = true; + + data->secondary = data->primary; + data->primary = chanctx_conf; + } + + if (vif->type == NL80211_IFTYPE_AP) { + if (!mvmvif->ap_ibss_active) + return; + + if (chanctx_conf == data->primary) + return; + + if (!data->primary_ll) { + /* + * downgrade the current primary no matter what its + * type is. + */ + data->secondary = data->primary; + data->primary = chanctx_conf; + } else { + /* there is low latency vif - we will be secondary */ + data->secondary = chanctx_conf; + } + return; + } + + /* + * STA / P2P Client, try to be primary if first vif. If we are in low + * latency mode, we are already in primary and just don't do much + */ + if (!data->primary || data->primary == chanctx_conf) + data->primary = chanctx_conf; + else if (!data->secondary) + /* if secondary is not NULL, it might be a GO */ + data->secondary = chanctx_conf; + + /* + * don't reduce the Tx power if one of these is true: + * we are in LOOSE + * single share antenna product + * BT is active + * we are associated + */ + if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || + mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || + !data->notif->bt_status) { + iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); + iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + return; + } + + /* try to get the avg rssi from fw */ + ave_rssi = mvmvif->bf_data.ave_beacon_signal; + + /* if the RSSI isn't valid, fake it is very low */ + if (!ave_rssi) + ave_rssi = -100; + if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { + if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) + IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); + } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { + if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) + IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); + } + + /* Begin to monitor the RSSI: it may influence the reduced Tx power */ + iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); +} + +static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) +{ + struct iwl_bt_iterator_data data = { + .mvm = mvm, + .notif = &mvm->last_bt_notif_old, + }; + struct iwl_bt_coex_ci_cmd_old cmd = {}; + u8 ci_bw_idx; + + /* Ignore updates if we are in force mode */ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return; + + rcu_read_lock(); + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bt_notif_iterator, &data); + + if (data.primary) { + struct ieee80211_chanctx_conf *chan = data.primary; + + if (WARN_ON(!chan->def.chan)) { + rcu_read_unlock(); + return; + } + + if (chan->def.width < NL80211_CHAN_WIDTH_40) { + ci_bw_idx = 0; + cmd.co_run_bw_primary = 0; + } else { + cmd.co_run_bw_primary = 1; + if (chan->def.center_freq1 > + chan->def.chan->center_freq) + ci_bw_idx = 2; + else + ci_bw_idx = 1; + } + + cmd.bt_primary_ci = + iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; + cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv); + } + + if (data.secondary) { + struct ieee80211_chanctx_conf *chan = data.secondary; + + if (WARN_ON(!data.secondary->def.chan)) { + rcu_read_unlock(); + return; + } + + if (chan->def.width < NL80211_CHAN_WIDTH_40) { + ci_bw_idx = 0; + cmd.co_run_bw_secondary = 0; + } else { + cmd.co_run_bw_secondary = 1; + if (chan->def.center_freq1 > + chan->def.chan->center_freq) + ci_bw_idx = 2; + else + ci_bw_idx = 1; + } + + cmd.bt_secondary_ci = + iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; + cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv); + } + + rcu_read_unlock(); + + /* Don't spam the fw with the same command over and over */ + if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) { + if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, + sizeof(cmd), &cmd)) + IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); + memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd)); + } + + if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) + IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); +} + +void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data; + + IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); + IWL_DEBUG_COEX(mvm, "\tBT status: %s\n", + notif->bt_status ? "ON" : "OFF"); + IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn); + IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); + IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", + le32_to_cpu(notif->primary_ch_lut)); + IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", + le32_to_cpu(notif->bt_activity_grading)); + IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n", + notif->bt_agg_traffic_load); + + /* remember this notification for future use: rssi fluctuations */ + memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old)); + + iwl_mvm_bt_coex_notif_handle(mvm); +} + +static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_bt_iterator_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + + struct ieee80211_chanctx_conf *chanctx_conf; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + /* If channel context is invalid or not on 2.4GHz - don't count it */ + if (!chanctx_conf || + chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + if (vif->type != NL80211_IFTYPE_STATION || + mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + return; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + lockdep_is_held(&mvm->mutex)); + + /* This can happen if the station has been removed right now */ + if (IS_ERR_OR_NULL(sta)) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); +} + +void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum ieee80211_rssi_event_data rssi_event) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_bt_iterator_data data = { + .mvm = mvm, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* Ignore updates if we are in force mode */ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return; + + /* + * Rssi update while not associated - can happen since the statistics + * are handled asynchronously + */ + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + return; + + /* No BT - reports should be disabled */ + if (!mvm->last_bt_notif_old.bt_status) + return; + + IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, + rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); + + /* + * Check if rssi is good enough for reduced Tx power, but not in loose + * scheme. + */ + if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || + iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) + ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + false); + else + ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); + + if (ret) + IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bt_rssi_iterator, &data); + + if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) + IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); +} + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) +#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) + +u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + enum iwl_bt_coex_lut_type lut_type; + + if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < + BT_HIGH_TRAFFIC) + return LINK_QUAL_AGG_TIME_LIMIT_DEF; + + if (mvm->last_bt_notif_old.ttc_enabled) + return LINK_QUAL_AGG_TIME_LIMIT_DEF; + + lut_type = iwl_get_coex_type(mvm, mvmsta->vif); + + if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) + return LINK_QUAL_AGG_TIME_LIMIT_DEF; + + /* tight coex, high bt traffic, reduce AGG time limit */ + return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; +} + +bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + enum iwl_bt_coex_lut_type lut_type; + + if (mvm->last_bt_notif_old.ttc_enabled) + return true; + + if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < + BT_HIGH_TRAFFIC) + return true; + + /* + * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas + * since BT is already killed. + * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while + * we Tx. + * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. + */ + lut_type = iwl_get_coex_type(mvm, mvmsta->vif); + return lut_type != BT_COEX_LOOSE_LUT; +} + +bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) +{ + u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); + return ag < BT_HIGH_TRAFFIC; +} + +bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, + enum ieee80211_band band) +{ + u32 bt_activity = + le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); + + if (band != IEEE80211_BAND_2GHZ) + return false; + + return bt_activity >= BT_LOW_TRAFFIC; +} + +void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm) +{ + iwl_mvm_bt_coex_notif_handle(mvm); +} + +void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 ant_isolation = le32_to_cpup((void *)pkt->data); + u8 __maybe_unused lower_bound, upper_bound; + u8 lut; + + struct iwl_bt_coex_cmd_old *bt_cmd; + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + + if (!iwl_mvm_bt_is_plcr_supported(mvm)) + return; + + lockdep_assert_held(&mvm->mutex); + + /* Ignore updates if we are in force mode */ + if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) + return; + + if (ant_isolation == mvm->last_ant_isol) + return; + + for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) + if (ant_isolation < antenna_coupling_ranges[lut + 1].range) + break; + + lower_bound = antenna_coupling_ranges[lut].range; + + if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) + upper_bound = antenna_coupling_ranges[lut + 1].range; + else + upper_bound = antenna_coupling_ranges[lut].range; + + IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", + ant_isolation, lower_bound, upper_bound, lut); + + mvm->last_ant_isol = ant_isolation; + + if (mvm->last_corun_lut == lut) + return; + + mvm->last_corun_lut = lut; + + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); + if (!bt_cmd) + return; + cmd.data[0] = bt_cmd; + + bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | + BT_VALID_CORUN_LUT_20 | + BT_VALID_CORUN_LUT_40); + + /* For the moment, use the same LUT for 20GHz and 40GHz */ + memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20, + sizeof(bt_cmd->bt4_corun_lut20)); + + memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, + sizeof(bt_cmd->bt4_corun_lut40)); + + if (iwl_mvm_send_cmd(mvm, &cmd)) + IWL_ERR(mvm, "failed to send BT_CONFIG command\n"); + + kfree(bt_cmd); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h new file mode 100644 index 000000000000..5c21231e195d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -0,0 +1,139 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __MVM_CONSTANTS_H +#define __MVM_CONSTANTS_H + +#include + +#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) +#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) +#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) +#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) +#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ +#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ +#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 +#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) +#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) +#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) +#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 +#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8 +#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30 +#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20 +#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 +#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 +#define IWL_MVM_PS_SNOOZE_INTERVAL 25 +#define IWL_MVM_PS_SNOOZE_WINDOW 50 +#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25 +#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64 +#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 +#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 +#define IWL_MVM_BT_COEX_SYNC2SCO 1 +#define IWL_MVM_BT_COEX_CORUNNING 0 +#define IWL_MVM_BT_COEX_MPLUT 1 +#define IWL_MVM_BT_COEX_RRC 1 +#define IWL_MVM_BT_COEX_TTC 1 +#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200 +#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 +#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 +#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 +#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 +#define IWL_MVM_QUOTA_THRESHOLD 4 +#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 +#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 +#define IWL_MVM_TOF_IS_RESPONDER 0 +#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 +#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 +#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3 +#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3 +#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2 +#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2 +#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1 +#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16 +#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3 +#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1 +#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3 +#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8 +#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */ +#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */ +#define IWL_MVM_RS_MISSED_RATE_MAX 15 +#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160 +#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480 +#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160 +#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400 +#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500 +#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */ +#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */ +#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */ +#define IWL_MVM_RS_AGG_DISABLE_START 3 +#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ +#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ +#define IWL_MVM_RS_TPC_TX_POWER_STEP 3 + +#endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c new file mode 100644 index 000000000000..85ae902df7c0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -0,0 +1,2104 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include "iwl-modparams.h" +#include "fw-api.h" +#include "mvm.h" + +void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (iwlwifi_mod_params.sw_crypto) + return; + + mutex_lock(&mvm->mutex); + + memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); + memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); + mvmvif->rekey_data.replay_ctr = + cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + mvmvif->rekey_data.valid = true; + + mutex_unlock(&mvm->mutex); +} + +#if IS_ENABLED(CONFIG_IPV6) +void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct inet6_ifaddr *ifa; + int idx = 0; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + mvmvif->target_ipv6_addrs[idx] = ifa->addr; + idx++; + if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) + break; + } + read_unlock_bh(&idev->lock); + + mvmvif->num_target_ipv6_addrs = idx; +} +#endif + +void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int idx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->tx_key_idx = idx; +} + +static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) +{ + int i; + + for (i = 0; i < IWL_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); +} + +struct wowlan_key_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwl_wowlan_tkip_params_cmd *tkip; + bool error, use_rsc_tsc, use_tkip; + int wep_key_idx; +}; + +static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct wowlan_key_data *data = _data; + struct aes_sc *aes_sc, *aes_tx_sc = NULL; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct iwl_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWL_P1K_SIZE]; + int ret, i; + + mutex_lock(&mvm->mutex); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ + struct { + struct iwl_mvm_wep_key_cmd wep_key_cmd; + struct iwl_mvm_wep_key wep_key; + } __packed wkc = { + .wep_key_cmd.mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .wep_key_cmd.num_keys = 1, + /* firmware sets STA_KEY_FLG_WEP_13BYTES */ + .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, + .wep_key.key_index = key->keyidx, + .wep_key.key_size = key->keylen, + }; + + /* + * This will fail -- the key functions don't set support + * pairwise WEP keys. However, that's better than silently + * failing WoWLAN. Or maybe not? + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + break; + + memcpy(&wkc.wep_key.key[3], key->key, key->keylen); + if (key->keyidx == mvmvif->tx_key_idx) { + /* TX key must be at offset 0 */ + wkc.wep_key.key_offset = 0; + } else { + /* others start at 1 */ + data->wep_key_idx++; + wkc.wep_key.key_offset = data->wep_key_idx; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); + data->error = ret != 0; + + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + + /* don't upload key again */ + goto out_unlock; + } + default: + data->error = true; + goto out_unlock; + case WLAN_CIPHER_SUITE_AES_CMAC: + /* + * Ignore CMAC keys -- the WoWLAN firmware doesn't support them + * but we also shouldn't abort suspend due to that. It does have + * support for the IGTK key renewal, but doesn't really use the + * IGTK for anything. This means we could spuriously wake up or + * be deauthenticated, but that was considered acceptable. + */ + goto out_unlock; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; + + rx_p1ks = data->tkip->rx_uni; + + ieee80211_get_key_tx_seq(key, &seq); + tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + + ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); + + memcpy(data->tkip->mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + rx_mic_key = data->tkip->mic_keys.rx_unicast; + } else { + tkip_sc = + data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + rx_p1ks = data->tkip->rx_multi; + rx_mic_key = data->tkip->mic_keys.rx_mcast; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWL_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32 + 1, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + data->use_tkip = true; + data->use_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (sta) { + u64 pn64; + + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); + } else { + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 for checking the IV in the frames. + */ + for (i = 0; i < IWL_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + data->use_rsc_tsc = true; + break; + } + + /* + * The D3 firmware hardcodes the key offset 0 as the key it uses + * to transmit packets to the AP, i.e. the PTK. + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + key->hw_key_idx = 0; + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + } else { + /* + * firmware only supports TSC/RSC for a single key, + * so if there are multiple keep overwriting them + * with new ones -- this relies on mac80211 doing + * list_add_tail(). + */ + key->hw_key_idx = 1; + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + } + + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); + data->error = ret != 0; +out_unlock: + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int i, err; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern); + + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = iwl_mvm_send_cmd(mvm, &cmd); + kfree(pattern_cmd); + return err; +} + +enum iwl_mvm_tcp_packet_type { + MVM_TCP_TX_SYN, + MVM_TCP_RX_SYNACK, + MVM_TCP_TX_DATA, + MVM_TCP_RX_ACK, + MVM_TCP_RX_WAKE, + MVM_TCP_TX_FIN, +}; + +static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr) +{ + __sum16 check = tcp_v4_check(len, saddr, daddr, 0); + return cpu_to_le16(be16_to_cpu((__force __be16)check)); +} + +static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif, + struct cfg80211_wowlan_tcp *tcp, + void *_pkt, u8 *mask, + __le16 *pseudo_hdr_csum, + enum iwl_mvm_tcp_packet_type ptype) +{ + struct { + struct ethhdr eth; + struct iphdr ip; + struct tcphdr tcp; + u8 data[]; + } __packed *pkt = _pkt; + u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr); + int i; + + pkt->eth.h_proto = cpu_to_be16(ETH_P_IP), + pkt->ip.version = 4; + pkt->ip.ihl = 5; + pkt->ip.protocol = IPPROTO_TCP; + + switch (ptype) { + case MVM_TCP_TX_SYN: + case MVM_TCP_TX_DATA: + case MVM_TCP_TX_FIN: + memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN); + memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN); + pkt->ip.ttl = 128; + pkt->ip.saddr = tcp->src; + pkt->ip.daddr = tcp->dst; + pkt->tcp.source = cpu_to_be16(tcp->src_port); + pkt->tcp.dest = cpu_to_be16(tcp->dst_port); + /* overwritten for TX SYN later */ + pkt->tcp.doff = sizeof(struct tcphdr) / 4; + pkt->tcp.window = cpu_to_be16(65000); + break; + case MVM_TCP_RX_SYNACK: + case MVM_TCP_RX_ACK: + case MVM_TCP_RX_WAKE: + memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN); + memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN); + pkt->ip.saddr = tcp->dst; + pkt->ip.daddr = tcp->src; + pkt->tcp.source = cpu_to_be16(tcp->dst_port); + pkt->tcp.dest = cpu_to_be16(tcp->src_port); + break; + default: + WARN_ON(1); + return; + } + + switch (ptype) { + case MVM_TCP_TX_SYN: + /* firmware assumes 8 option bytes - 8 NOPs for now */ + memset(pkt->data, 0x01, 8); + ip_tot_len += 8; + pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4; + pkt->tcp.syn = 1; + break; + case MVM_TCP_TX_DATA: + ip_tot_len += tcp->payload_len; + memcpy(pkt->data, tcp->payload, tcp->payload_len); + pkt->tcp.psh = 1; + pkt->tcp.ack = 1; + break; + case MVM_TCP_TX_FIN: + pkt->tcp.fin = 1; + pkt->tcp.ack = 1; + break; + case MVM_TCP_RX_SYNACK: + pkt->tcp.syn = 1; + pkt->tcp.ack = 1; + break; + case MVM_TCP_RX_ACK: + pkt->tcp.ack = 1; + break; + case MVM_TCP_RX_WAKE: + ip_tot_len += tcp->wake_len; + pkt->tcp.psh = 1; + pkt->tcp.ack = 1; + memcpy(pkt->data, tcp->wake_data, tcp->wake_len); + break; + } + + switch (ptype) { + case MVM_TCP_TX_SYN: + case MVM_TCP_TX_DATA: + case MVM_TCP_TX_FIN: + pkt->ip.tot_len = cpu_to_be16(ip_tot_len); + pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl); + break; + case MVM_TCP_RX_WAKE: + for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) { + u8 tmp = tcp->wake_mask[i]; + mask[i + 6] |= tmp << 6; + if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8)) + mask[i + 7] = tmp >> 2; + } + /* fall through for ethernet/IP/TCP headers mask */ + case MVM_TCP_RX_SYNACK: + case MVM_TCP_RX_ACK: + mask[0] = 0xff; /* match ethernet */ + /* + * match ethernet, ip.version, ip.ihl + * the ip.ihl half byte is really masked out by firmware + */ + mask[1] = 0x7f; + mask[2] = 0x80; /* match ip.protocol */ + mask[3] = 0xfc; /* match ip.saddr, ip.daddr */ + mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */ + mask[5] = 0x80; /* match tcp flags */ + /* leave rest (0 or set for MVM_TCP_RX_WAKE) */ + break; + }; + + *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr), + pkt->ip.saddr, pkt->ip.daddr); +} + +static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_wowlan_tcp *tcp) +{ + struct iwl_wowlan_remote_wake_config *cfg; + struct iwl_host_cmd cmd = { + .id = REMOTE_WAKE_CONFIG_CMD, + .len = { sizeof(*cfg), }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + int ret; + + if (!tcp) + return 0; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + cmd.data[0] = cfg; + + cfg->max_syn_retries = 10; + cfg->max_data_retries = 10; + cfg->tcp_syn_ack_timeout = 1; /* seconds */ + cfg->tcp_ack_timeout = 1; /* seconds */ + + /* SYN (TX) */ + iwl_mvm_build_tcp_packet( + vif, tcp, cfg->syn_tx.data, NULL, + &cfg->syn_tx.info.tcp_pseudo_header_checksum, + MVM_TCP_TX_SYN); + cfg->syn_tx.info.tcp_payload_length = 0; + + /* SYN/ACK (RX) */ + iwl_mvm_build_tcp_packet( + vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, + &cfg->synack_rx.info.tcp_pseudo_header_checksum, + MVM_TCP_RX_SYNACK); + cfg->synack_rx.info.tcp_payload_length = 0; + + /* KEEPALIVE/ACK (TX) */ + iwl_mvm_build_tcp_packet( + vif, tcp, cfg->keepalive_tx.data, NULL, + &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, + MVM_TCP_TX_DATA); + cfg->keepalive_tx.info.tcp_payload_length = + cpu_to_le16(tcp->payload_len); + cfg->sequence_number_offset = tcp->payload_seq.offset; + /* length must be 0..4, the field is little endian */ + cfg->sequence_number_length = tcp->payload_seq.len; + cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start); + cfg->keepalive_interval = cpu_to_le16(tcp->data_interval); + if (tcp->payload_tok.len) { + cfg->token_offset = tcp->payload_tok.offset; + cfg->token_length = tcp->payload_tok.len; + cfg->num_tokens = + cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len); + memcpy(cfg->tokens, tcp->payload_tok.token_stream, + tcp->tokens_size); + } else { + /* set tokens to max value to almost never run out */ + cfg->num_tokens = cpu_to_le16(65535); + } + + /* ACK (RX) */ + iwl_mvm_build_tcp_packet( + vif, tcp, cfg->keepalive_ack_rx.data, + cfg->keepalive_ack_rx.rx_mask, + &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, + MVM_TCP_RX_ACK); + cfg->keepalive_ack_rx.info.tcp_payload_length = 0; + + /* WAKEUP (RX) */ + iwl_mvm_build_tcp_packet( + vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, + &cfg->wake_rx.info.tcp_pseudo_header_checksum, + MVM_TCP_RX_WAKE); + cfg->wake_rx.info.tcp_payload_length = + cpu_to_le16(tcp->wake_len); + + /* FIN */ + iwl_mvm_build_tcp_packet( + vif, tcp, cfg->fin_tx.data, NULL, + &cfg->fin_tx.info.tcp_pseudo_header_checksum, + MVM_TCP_TX_FIN); + cfg->fin_tx.info.tcp_payload_length = 0; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + kfree(cfg); + + return ret; +} + +static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *ap_sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *ctx; + u8 chains_static, chains_dynamic; + struct cfg80211_chan_def chandef; + int ret, i; + struct iwl_binding_cmd binding_cmd = {}; + struct iwl_time_quota_cmd quota_cmd = {}; + u32 status; + + /* add back the PHY */ + if (WARN_ON(!mvmvif->phy_ctxt)) + return -EINVAL; + + rcu_read_lock(); + ctx = rcu_dereference(vif->chanctx_conf); + if (WARN_ON(!ctx)) { + rcu_read_unlock(); + return -EINVAL; + } + chandef = ctx->def; + chains_static = ctx->rx_chains_static; + chains_dynamic = ctx->rx_chains_dynamic; + rcu_read_unlock(); + + ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, + chains_static, chains_dynamic); + if (ret) + return ret; + + /* add back the MAC */ + mvmvif->uploaded = false; + + if (WARN_ON(!vif->bss_conf.assoc)) + return -EINVAL; + + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + return ret; + + /* add back binding - XXX refactor? */ + binding_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + binding_cmd.phy = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + for (i = 1; i < MAX_MACS_IN_BINDING; i++) + binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); + + status = 0; + ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, + sizeof(binding_cmd), &binding_cmd, + &status); + if (ret) { + IWL_ERR(mvm, "Failed to add binding: %d\n", ret); + return ret; + } + + if (status) { + IWL_ERR(mvm, "Binding command failed: %u\n", status); + return -EIO; + } + + ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); + if (ret) + return ret; + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (ret) + return ret; + + /* and some quota */ + quota_cmd.quotas[0].id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); + quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); + + for (i = 1; i < MAX_BINDINGS; i++) + quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); + + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, + sizeof(quota_cmd), "a_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send quota: %d\n", ret); + + if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) + IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); + + return 0; +} + +static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_nonqos_seq_query_cmd query_cmd = { + .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), + .mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + }; + struct iwl_host_cmd cmd = { + .id = NON_QOS_TX_COUNTER_CMD, + .flags = CMD_WANT_SKB, + }; + int err; + u32 size; + + cmd.data[0] = &query_cmd; + cmd.len[0] = sizeof(query_cmd); + + err = iwl_mvm_send_cmd(mvm, &cmd); + if (err) + return err; + + size = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (size < sizeof(__le16)) { + err = -EINVAL; + } else { + err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); + /* firmware returns next, not last-used seqno */ + err = (u16) (err - 0x10); + } + + iwl_free_resp(&cmd); + return err; +} + +void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_nonqos_seq_query_cmd query_cmd = { + .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), + .mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .value = cpu_to_le16(mvmvif->seqno), + }; + + /* return if called during restart, not resume from D3 */ + if (!mvmvif->seqno_valid) + return; + + mvmvif->seqno_valid = false; + + if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, + sizeof(query_cmd), &query_cmd)) + IWL_ERR(mvm, "failed to set non-QoS seqno\n"); +} + +static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) +{ + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); + + iwl_trans_stop_device(mvm->trans); + + /* + * Set the HW restart bit -- this is mostly true as we're + * going to load new firmware and reprogram that, though + * the reprogramming is going to be manual to avoid adding + * all the MACs that aren't support. + * We don't have to clear up everything though because the + * reprogramming is manual. When we resume, we'll actually + * go through a proper restart sequence again to switch + * back to the runtime firmware image. + */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + + /* We reprogram keys and shouldn't allocate new key indices */ + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); + + mvm->ptk_ivlen = 0; + mvm->ptk_icvlen = 0; + mvm->ptk_ivlen = 0; + mvm->ptk_icvlen = 0; + + return iwl_mvm_load_d3_fw(mvm); +} + +static int +iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ + int ret; + struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); + + /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ + + wowlan_config_cmd->is_11n_connection = + ap_sta->ht_cap.ht_supported; + + /* Query the last used seqno and set it */ + ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); + if (ret < 0) + return ret; + + wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); + + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); + + if (wowlan->disconnect) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->n_patterns) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); + + if (wowlan->rfkill_release) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + + if (wowlan->tcp) { + /* + * Set the "link change" (really "link lost") flag as well + * since that implies losing the TCP connection. + */ + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | + IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | + IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | + IWL_WOWLAN_WAKEUP_LINK_CHANGE); + } + + return 0; +} + +static int +iwl_mvm_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ + struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; + struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + struct wowlan_key_data key_data = { + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret; + + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); + if (ret) + return ret; + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) + return -ENOMEM; + + if (!iwlwifi_mod_params.sw_crypto) { + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&mvm->mutex); + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_program_keys, + &key_data); + mutex_lock(&mvm->mutex); + if (key_data.error) { + ret = -EIO; + goto out; + } + + if (key_data.use_rsc_tsc) { + struct iwl_host_cmd rsc_tsc_cmd = { + .id = WOWLAN_TSC_RSC_PARAM, + .data[0] = key_data.rsc_tsc, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(*key_data.rsc_tsc), + }; + + ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd); + if (ret) + goto out; + } + + if (key_data.use_tkip) { + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TKIP_PARAM, + 0, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto out; + } + + if (mvmvif->rekey_data.valid) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, + NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, + NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; + + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_KEK_KCK_MATERIAL, 0, + sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto out; + } + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(*wowlan_config_cmd), + wowlan_config_cmd); + if (ret) + goto out; + + ret = iwl_mvm_send_patterns(mvm, wowlan); + if (ret) + goto out; + + ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0); + if (ret) + goto out; + + ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp); + +out: + kfree(key_data.rsc_tsc); + return ret; +} + +static int +iwl_mvm_netdetect_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct cfg80211_sched_scan_request *nd_config, + struct ieee80211_vif *vif) +{ + struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + int ret; + + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + + /* rfkill release can be either for wowlan or netdetect */ + if (wowlan->rfkill_release) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(wowlan_config_cmd), + &wowlan_config_cmd); + if (ret) + return ret; + + ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, + IWL_MVM_SCAN_NETDETECT); + if (ret) + return ret; + + if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) + return -EBUSY; + + /* save the sched scan matchsets... */ + if (nd_config->n_match_sets) { + mvm->nd_match_sets = kmemdup(nd_config->match_sets, + sizeof(*nd_config->match_sets) * + nd_config->n_match_sets, + GFP_KERNEL); + if (mvm->nd_match_sets) + mvm->n_nd_match_sets = nd_config->n_match_sets; + } + + /* ...and the sched scan channels for later reporting */ + mvm->nd_channels = kmemdup(nd_config->channels, + sizeof(*nd_config->channels) * + nd_config->n_channels, + GFP_KERNEL); + if (mvm->nd_channels) + mvm->n_nd_channels = nd_config->n_channels; + + return 0; +} + +static void iwl_mvm_free_nd(struct iwl_mvm *mvm) +{ + kfree(mvm->nd_match_sets); + mvm->nd_match_sets = NULL; + mvm->n_nd_match_sets = 0; + kfree(mvm->nd_channels); + mvm->nd_channels = NULL; + mvm->n_nd_channels = 0; +} + +static int __iwl_mvm_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan, + bool test) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = NULL; + struct iwl_mvm_vif *mvmvif = NULL; + struct ieee80211_sta *ap_sta = NULL; + struct iwl_d3_manager_config d3_cfg_cmd_data = { + /* + * Program the minimum sleep time to 10 seconds, as many + * platforms have issues processing a wakeup signal while + * still being in the process of suspending. + */ + .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), + }; + struct iwl_host_cmd d3_cfg_cmd = { + .id = D3_CONFIG_CMD, + .flags = CMD_WANT_SKB, + .data[0] = &d3_cfg_cmd_data, + .len[0] = sizeof(d3_cfg_cmd_data), + }; + int ret; + int len __maybe_unused; + + if (!wowlan) { + /* + * mac80211 shouldn't get here, but for D3 test + * it doesn't warrant a warning + */ + WARN_ON(!test); + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) { + ret = 1; + goto out_noreset; + } + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { + /* if we're not associated, this must be netdetect */ + if (!wowlan->nd_config && !mvm->nd_config) { + ret = 1; + goto out_noreset; + } + + ret = iwl_mvm_netdetect_config( + mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif); + if (ret) + goto out; + + mvm->net_detect = true; + } else { + struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + + ap_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(ap_sta)) { + ret = -EINVAL; + goto out_noreset; + } + + ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, + vif, mvmvif, ap_sta); + if (ret) + goto out_noreset; + ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, + vif, mvmvif, ap_sta); + if (ret) + goto out; + + mvm->net_detect = false; + } + + ret = iwl_mvm_power_update_device(mvm); + if (ret) + goto out; + + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + goto out; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->d3_wake_sysassert) + d3_cfg_cmd_data.wakeup_flags |= + cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); +#endif + + /* must be last -- this switches firmware state */ + ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); + if (ret) + goto out; +#ifdef CONFIG_IWLWIFI_DEBUGFS + len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); + if (len >= sizeof(u32)) { + mvm->d3_test_pme_ptr = + le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); + } +#endif + iwl_free_resp(&d3_cfg_cmd); + + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + + iwl_trans_d3_suspend(mvm->trans, test); + out: + if (ret < 0) { + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + ieee80211_restart_hw(mvm->hw); + iwl_mvm_free_nd(mvm); + } + out_noreset: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) +{ + struct iwl_notification_wait wait_d3; + static const u16 d3_notif[] = { D3_CONFIG_CMD }; + int ret; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, + d3_notif, ARRAY_SIZE(d3_notif), + NULL, NULL); + + ret = iwl_mvm_enter_d0i3(mvm->hw->priv); + if (ret) + goto remove_notif; + + ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ); + WARN_ON_ONCE(ret); + return ret; + +remove_notif: + iwl_remove_notification(&mvm->notif_wait, &wait_d3); + return ret; +} + +int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + /* make sure the d0i3 exit work is not pending */ + flush_work(&mvm->d0i3_exit_work); + + ret = iwl_trans_suspend(mvm->trans); + if (ret) + return ret; + + mvm->trans->wowlan_d0i3 = wowlan->any; + if (mvm->trans->wowlan_d0i3) { + /* 'any' trigger means d0i3 usage */ + if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { + ret = iwl_mvm_enter_d0i3_sync(mvm); + + if (ret) + return ret; + } + + mutex_lock(&mvm->d0i3_suspend_mutex); + __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); + mutex_unlock(&mvm->d0i3_suspend_mutex); + + iwl_trans_d3_suspend(mvm->trans, false); + + return 0; + } + + return __iwl_mvm_suspend(hw, wowlan, false); +} + +/* converted data from the different status responses */ +struct iwl_wowlan_status_data { + u16 pattern_number; + u16 qos_seq_ctr[8]; + u32 wakeup_reasons; + u32 wake_packet_length; + u32 wake_packet_bufsize; + const u8 *wake_packet; +}; + +static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status_data *status) +{ + struct sk_buff *pkt = NULL; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + u32 reasons = status->wakeup_reasons; + + if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { + wakeup_report = NULL; + goto report; + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) + wakeup.magic_pkt = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) + wakeup.pattern_idx = + status->pattern_number; + + if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) + wakeup.disconnect = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) + wakeup.gtk_rekey_failure = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) + wakeup.eap_identity_req = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) + wakeup.four_way_handshake = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) + wakeup.tcp_connlost = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) + wakeup.tcp_nomoretokens = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) + wakeup.tcp_match = true; + + if (status->wake_packet_bufsize) { + int pktsize = status->wake_packet_bufsize; + int pktlen = status->wake_packet_length; + const u8 *pktdata = status->wake_packet; + struct ieee80211_hdr *hdr = (void *)pktdata; + int truncated = pktlen - pktsize; + + /* this would be a firmware bug */ + if (WARN_ON_ONCE(truncated < 0)) + truncated = 0; + + if (ieee80211_is_data(hdr->frame_control)) { + int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int ivlen = 0, icvlen = 4; /* also FCS */ + + pkt = alloc_skb(pktsize, GFP_KERNEL); + if (!pkt) + goto report; + + memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen); + pktdata += hdrlen; + pktsize -= hdrlen; + + if (ieee80211_has_protected(hdr->frame_control)) { + /* + * This is unlocked and using gtk_i(c)vlen, + * but since everything is under RTNL still + * that's not really a problem - changing + * it would be difficult. + */ + if (is_multicast_ether_addr(hdr->addr1)) { + ivlen = mvm->gtk_ivlen; + icvlen += mvm->gtk_icvlen; + } else { + ivlen = mvm->ptk_ivlen; + icvlen += mvm->ptk_icvlen; + } + } + + /* if truncated, FCS/ICV is (partially) gone */ + if (truncated >= icvlen) { + icvlen = 0; + truncated -= icvlen; + } else { + icvlen -= truncated; + truncated = 0; + } + + pktsize -= ivlen + icvlen; + pktdata += ivlen; + + memcpy(skb_put(pkt, pktsize), pktdata, pktsize); + + if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) + goto report; + wakeup.packet = pkt->data; + wakeup.packet_present_len = pkt->len; + wakeup.packet_len = pkt->len - truncated; + wakeup.packet_80211 = false; + } else { + int fcslen = 4; + + if (truncated >= 4) { + truncated -= 4; + fcslen = 0; + } else { + fcslen -= truncated; + truncated = 0; + } + pktsize -= fcslen; + wakeup.packet = status->wake_packet; + wakeup.packet_present_len = pktsize; + wakeup.packet_len = pktlen - truncated; + wakeup.packet_80211 = true; + } + } + + report: + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + kfree_skb(pkt); +} + +static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, + struct ieee80211_key_seq *seq) +{ + u64 pn; + + pn = le64_to_cpu(sc->pn); + seq->ccmp.pn[0] = pn >> 40; + seq->ccmp.pn[1] = pn >> 32; + seq->ccmp.pn[2] = pn >> 24; + seq->ccmp.pn[3] = pn >> 16; + seq->ccmp.pn[4] = pn >> 8; + seq->ccmp.pn[5] = pn; +} + +static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, + struct ieee80211_key_seq *seq) +{ + seq->tkip.iv32 = le32_to_cpu(sc->iv32); + seq->tkip.iv16 = le16_to_cpu(sc->iv16); +} + +static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs, + struct ieee80211_key_conf *key) +{ + int tid; + + BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); + + for (tid = 0; tid < IWL_NUM_RSC; tid++) { + struct ieee80211_key_seq seq = {}; + + iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); + ieee80211_set_key_rx_seq(key, tid, &seq); + } +} + +static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, + struct ieee80211_key_conf *key) +{ + int tid; + + BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); + + for (tid = 0; tid < IWL_NUM_RSC; tid++) { + struct ieee80211_key_seq seq = {}; + + iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); + ieee80211_set_key_rx_seq(key, tid, &seq); + } +} + +static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, + struct iwl_wowlan_status *status) +{ + union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key); + break; + case WLAN_CIPHER_SUITE_TKIP: + iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); + break; + default: + WARN_ON(1); + } +} + +struct iwl_mvm_d3_gtk_iter_data { + struct iwl_wowlan_status *status; + void *last_gtk; + u32 cipher; + bool find_phase, unhandled_cipher; + int num_keys; +}; + +static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm_d3_gtk_iter_data *data = _data; + + if (data->unhandled_cipher) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* ignore WEP completely, nothing to do */ + return; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_TKIP: + /* we support these */ + break; + default: + /* everything else (even CMAC for MFP) - disconnect from AP */ + data->unhandled_cipher = true; + return; + } + + data->num_keys++; + + /* + * pairwise key - update sequence counters only; + * note that this assumes no TDLS sessions are active + */ + if (sta) { + struct ieee80211_key_seq seq = {}; + union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc; + + if (data->find_phase) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); + atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); + break; + case WLAN_CIPHER_SUITE_TKIP: + iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); + iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); + ieee80211_set_key_tx_seq(key, &seq); + break; + } + + /* that's it for this key */ + return; + } + + if (data->find_phase) { + data->last_gtk = key; + data->cipher = key->cipher; + return; + } + + if (data->status->num_of_gtk_rekeys) + ieee80211_remove_key(key); + else if (data->last_gtk == key) + iwl_mvm_set_key_rx_seq(key, data->status); +} + +static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status *status) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_d3_gtk_iter_data gtkdata = { + .status = status, + }; + u32 disconnection_reasons = + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; + + if (!status || !vif->bss_conf.bssid) + return false; + + if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) + return false; + + /* find last GTK that we used initially, if any */ + gtkdata.find_phase = true; + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_d3_update_gtks, >kdata); + /* not trying to keep connections with MFP/unhandled ciphers */ + if (gtkdata.unhandled_cipher) + return false; + if (!gtkdata.num_keys) + goto out; + if (!gtkdata.last_gtk) + return false; + + /* + * invalidate all other GTKs that might still exist and update + * the one that we used + */ + gtkdata.find_phase = false; + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_d3_update_gtks, >kdata); + + if (status->num_of_gtk_rekeys) { + struct ieee80211_key_conf *key; + struct { + struct ieee80211_key_conf conf; + u8 key[32]; + } conf = { + .conf.cipher = gtkdata.cipher, + .conf.keyidx = status->gtk.key_index, + }; + + switch (gtkdata.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + memcpy(conf.conf.key, status->gtk.decrypt_key, + WLAN_KEY_LEN_CCMP); + break; + case WLAN_CIPHER_SUITE_TKIP: + conf.conf.keylen = WLAN_KEY_LEN_TKIP; + memcpy(conf.conf.key, status->gtk.decrypt_key, 16); + /* leave TX MIC key zeroed, we don't use it anyway */ + memcpy(conf.conf.key + + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + status->gtk.tkip_mic_key, 8); + break; + } + + key = ieee80211_gtk_rekey_add(vif, &conf.conf); + if (IS_ERR(key)) + return false; + iwl_mvm_set_key_rx_seq(key, status); + } + + if (status->num_of_gtk_rekeys) { + __be64 replay_ctr = + cpu_to_be64(le64_to_cpu(status->replay_ctr)); + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, + (void *)&replay_ctr, GFP_KERNEL); + } + +out: + mvmvif->seqno_valid = true; + /* +0x10 because the set API expects next-to-use, not last-used */ + mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; + + return true; +} + +static struct iwl_wowlan_status * +iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + u32 base = mvm->error_event_table; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + struct iwl_host_cmd cmd = { + .id = WOWLAN_GET_STATUSES, + .flags = CMD_WANT_SKB, + }; + struct iwl_wowlan_status *status, *fw_status; + int ret, len, status_size; + + iwl_trans_read_mem_bytes(mvm->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid) { + IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", + err_info.valid, err_info.error_id); + if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + return ERR_PTR(-EIO); + } + + /* only for tracing for now */ + ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); + if (ret) + IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query status (%d)\n", ret); + return ERR_PTR(ret); + } + + /* RF-kill already asserted again... */ + if (!cmd.resp_pkt) { + fw_status = ERR_PTR(-ERFKILL); + goto out_free_resp; + } + + status_size = sizeof(*fw_status); + + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (len < status_size) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + fw_status = ERR_PTR(-EIO); + goto out_free_resp; + } + + status = (void *)cmd.resp_pkt->data; + if (len != (status_size + + ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + fw_status = ERR_PTR(-EIO); + goto out_free_resp; + } + + fw_status = kmemdup(status, len, GFP_KERNEL); + +out_free_resp: + iwl_free_resp(&cmd); + return fw_status; +} + +/* releases the MVM mutex */ +static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_wowlan_status_data status; + struct iwl_wowlan_status *fw_status; + int i; + bool keep; + struct ieee80211_sta *ap_sta; + struct iwl_mvm_sta *mvm_ap_sta; + + fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + if (IS_ERR_OR_NULL(fw_status)) + goto out_unlock; + + status.pattern_number = le16_to_cpu(fw_status->pattern_number); + for (i = 0; i < 8; i++) + status.qos_seq_ctr[i] = + le16_to_cpu(fw_status->qos_seq_ctr[i]); + status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); + status.wake_packet_length = + le32_to_cpu(fw_status->wake_packet_length); + status.wake_packet_bufsize = + le32_to_cpu(fw_status->wake_packet_bufsize); + status.wake_packet = fw_status->wake_packet; + + /* still at hard-coded place 0 for D3 image */ + ap_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[0], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(ap_sta)) + goto out_free; + + mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u16 seq = status.qos_seq_ctr[i]; + /* firmware stores last-used value, we store next value */ + seq += 0x10; + mvm_ap_sta->tid_data[i].seq_number = seq; + } + + /* now we have all the data we need, unlock to avoid mac80211 issues */ + mutex_unlock(&mvm->mutex); + + iwl_mvm_report_wakeup_reasons(mvm, vif, &status); + + keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); + + kfree(fw_status); + return keep; + +out_free: + kfree(fw_status); +out_unlock: + mutex_unlock(&mvm->mutex); + return false; +} + +struct iwl_mvm_nd_query_results { + u32 matched_profiles; + struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; +}; + +static int +iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *results) +{ + struct iwl_scan_offload_profiles_query *query; + struct iwl_host_cmd cmd = { + .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, + .flags = CMD_WANT_SKB, + }; + int ret, len; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); + return ret; + } + + /* RF-kill already asserted again... */ + if (!cmd.resp_pkt) { + ret = -ERFKILL; + goto out_free_resp; + } + + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (len < sizeof(*query)) { + IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); + ret = -EIO; + goto out_free_resp; + } + + query = (void *)cmd.resp_pkt->data; + + results->matched_profiles = le32_to_cpu(query->matched_profiles); + memcpy(results->matches, query->matches, sizeof(results->matches)); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); +#endif + +out_free_resp: + iwl_free_resp(&cmd); + return ret; +} + +static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct cfg80211_wowlan_nd_info *net_detect = NULL; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + struct iwl_mvm_nd_query_results query; + struct iwl_wowlan_status *fw_status; + unsigned long matched_profiles; + u32 reasons = 0; + int i, j, n_matches, ret; + + fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + if (!IS_ERR_OR_NULL(fw_status)) { + reasons = le32_to_cpu(fw_status->wakeup_reasons); + kfree(fw_status); + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) + goto out; + + ret = iwl_mvm_netdetect_query_results(mvm, &query); + if (ret || !query.matched_profiles) { + wakeup_report = NULL; + goto out; + } + + matched_profiles = query.matched_profiles; + if (mvm->n_nd_match_sets) { + n_matches = hweight_long(matched_profiles); + } else { + IWL_ERR(mvm, "no net detect match information available\n"); + n_matches = 0; + } + + net_detect = kzalloc(sizeof(*net_detect) + + (n_matches * sizeof(net_detect->matches[0])), + GFP_KERNEL); + if (!net_detect || !n_matches) + goto out_report_nd; + + for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { + struct iwl_scan_offload_profile_match *fw_match; + struct cfg80211_wowlan_nd_match *match; + int idx, n_channels = 0; + + fw_match = &query.matches[i]; + + for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) + n_channels += hweight8(fw_match->matching_channels[j]); + + match = kzalloc(sizeof(*match) + + (n_channels * sizeof(*match->channels)), + GFP_KERNEL); + if (!match) + goto out_report_nd; + + net_detect->matches[net_detect->n_matches++] = match; + + /* We inverted the order of the SSIDs in the scan + * request, so invert the index here. + */ + idx = mvm->n_nd_match_sets - i - 1; + match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; + memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, + match->ssid.ssid_len); + + if (mvm->n_nd_channels < n_channels) + continue; + + for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++) + if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) + match->channels[match->n_channels++] = + mvm->nd_channels[j]->center_freq; + } + +out_report_nd: + wakeup.net_detect = net_detect; +out: + iwl_mvm_free_nd(mvm); + + mutex_unlock(&mvm->mutex); + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + + if (net_detect) { + for (i = 0; i < net_detect->n_matches; i++) + kfree(net_detect->matches[i]); + kfree(net_detect); + } +} + +static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) +{ +#ifdef CONFIG_IWLWIFI_DEBUGFS + const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; + u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; + u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; + + if (!mvm->store_d3_resume_sram) + return; + + if (!mvm->d3_resume_sram) { + mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); + if (!mvm->d3_resume_sram) + return; + } + + iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); +#endif +} + +static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + /* skip the one we keep connection on */ + if (data == vif) + return; + + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_resume_disconnect(vif); +} + +static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) +{ + struct ieee80211_vif *vif = NULL; + int ret; + enum iwl_d3_status d3_status; + bool keep = false; + + mutex_lock(&mvm->mutex); + + /* get the BSS vif pointer again */ + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) + goto err; + + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); + if (ret) + goto err; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mvm, "Device was reset during suspend\n"); + goto err; + } + + /* query SRAM first in case we want event logging */ + iwl_mvm_read_d3_sram(mvm); + + /* + * Query the current location and source from the D3 firmware so we + * can play it back when we re-intiailize the D0 firmware + */ + iwl_mvm_update_changed_regdom(mvm); + + if (mvm->net_detect) { + iwl_mvm_query_netdetect_reasons(mvm, vif); + /* has unlocked the mutex, so skip that */ + goto out; + } else { + keep = iwl_mvm_query_wakeup_reasons(mvm, vif); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (keep) + mvm->keep_vif = vif; +#endif + /* has unlocked the mutex, so skip that */ + goto out_iterate; + } + +err: + iwl_mvm_free_nd(mvm); + mutex_unlock(&mvm->mutex); + +out_iterate: + if (!test) + ieee80211_iterate_active_interfaces_rtnl(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); + +out: + /* return 1 to reconfigure the device */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); + + /* We always return 1, which causes mac80211 to do a reconfig + * with IEEE80211_RECONFIG_TYPE_RESTART. This type of + * reconfig calls iwl_mvm_restart_complete(), where we unref + * the IWL_MVM_REF_UCODE_DOWN, so we need to take the + * reference here. + */ + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + return 1; +} + +static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) +{ + iwl_trans_resume(mvm->trans); + + return __iwl_mvm_resume(mvm, false); +} + +static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) +{ + bool exit_now; + enum iwl_d3_status d3_status; + + iwl_trans_d3_resume(mvm->trans, &d3_status, false); + + /* + * make sure to clear D0I3_DEFER_WAKEUP before + * calling iwl_trans_resume(), which might wait + * for d0i3 exit completion. + */ + mutex_lock(&mvm->d0i3_suspend_mutex); + __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); + exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, + &mvm->d0i3_suspend_flags); + mutex_unlock(&mvm->d0i3_suspend_mutex); + if (exit_now) { + IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); + _iwl_mvm_exit_d0i3(mvm); + } + + iwl_trans_resume(mvm->trans); + + if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { + int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); + + if (ret) + return ret; + /* + * d0i3 exit will be deferred until reconfig_complete. + * make sure there we are out of d0i3. + */ + } + return 0; +} + +int iwl_mvm_resume(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* 'any' trigger means d0i3 was used */ + if (hw->wiphy->wowlan_config->any) + return iwl_mvm_resume_d0i3(mvm); + else + return iwl_mvm_resume_d3(mvm); +} + +void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + device_set_wakeup_enable(mvm->trans->dev, enabled); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + int err; + + if (mvm->d3_test_active) + return -EBUSY; + + file->private_data = inode->i_private; + + ieee80211_stop_queues(mvm->hw); + synchronize_net(); + + /* start pseudo D3 */ + rtnl_lock(); + err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); + rtnl_unlock(); + if (err > 0) + err = -EINVAL; + if (err) { + ieee80211_wake_queues(mvm->hw); + return err; + } + mvm->d3_test_active = true; + mvm->keep_vif = NULL; + return 0; +} + +static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u32 pme_asserted; + + while (true) { + /* read pme_ptr if available */ + if (mvm->d3_test_pme_ptr) { + pme_asserted = iwl_trans_read_mem32(mvm->trans, + mvm->d3_test_pme_ptr); + if (pme_asserted) + break; + } + + if (msleep_interruptible(100)) + break; + } + + return 0; +} + +static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + /* skip the one we keep connection on */ + if (_data == vif) + return; + + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_connection_loss(vif); +} + +static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + int remaining_time = 10; + + mvm->d3_test_active = false; + rtnl_lock(); + __iwl_mvm_resume(mvm, true); + rtnl_unlock(); + iwl_abort_notification_waits(&mvm->notif_wait); + ieee80211_restart_hw(mvm->hw); + + /* wait for restart and disconnect all interfaces */ + while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + remaining_time > 0) { + remaining_time--; + msleep(1000); + } + + if (remaining_time == 0) + IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); + + ieee80211_wake_queues(mvm->hw); + + return 0; +} + +const struct file_operations iwl_dbgfs_d3_test_ops = { + .llseek = no_llseek, + .open = iwl_mvm_d3_test_open, + .read = iwl_mvm_d3_test_read, + .release = iwl_mvm_d3_test_release, +}; +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c new file mode 100644 index 000000000000..7904b41a04c6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -0,0 +1,1483 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "mvm.h" +#include "fw-api-tof.h" +#include "debugfs.h" + +static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_dbgfs_pm_mask param, int val) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; + + dbgfs_pm->mask |= param; + + switch (param) { + case MVM_DEBUGFS_PM_KEEP_ALIVE: { + int dtimper = vif->bss_conf.dtim_period ?: 1; + int dtimper_msec = dtimper * vif->bss_conf.beacon_int; + + IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); + if (val * MSEC_PER_SEC < 3 * dtimper_msec) + IWL_WARN(mvm, + "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", + val * MSEC_PER_SEC, 3 * dtimper_msec); + dbgfs_pm->keep_alive_seconds = val; + break; + } + case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: + IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", + val ? "enabled" : "disabled"); + dbgfs_pm->skip_over_dtim = val; + break; + case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: + IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); + dbgfs_pm->skip_dtim_periods = val; + break; + case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); + dbgfs_pm->rx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); + dbgfs_pm->tx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_LPRX_ENA: + IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); + dbgfs_pm->lprx_ena = val; + break; + case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD: + IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); + dbgfs_pm->lprx_rssi_threshold = val; + break; + case MVM_DEBUGFS_PM_SNOOZE_ENABLE: + IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val); + dbgfs_pm->snooze_ena = val; + break; + case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING: + IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); + dbgfs_pm->uapsd_misbehaving = val; + break; + case MVM_DEBUGFS_PM_USE_PS_POLL: + IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); + dbgfs_pm->use_ps_poll = val; + break; + } +} + +static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + enum iwl_dbgfs_pm_mask param; + int val, ret; + + if (!strncmp("keep_alive=", buf, 11)) { + if (sscanf(buf + 11, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_KEEP_ALIVE; + } else if (!strncmp("skip_over_dtim=", buf, 15)) { + if (sscanf(buf + 15, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; + } else if (!strncmp("skip_dtim_periods=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; + } else if (!strncmp("rx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; + } else if (!strncmp("tx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; + } else if (!strncmp("lprx=", buf, 5)) { + if (sscanf(buf + 5, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_LPRX_ENA; + } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) { + if (sscanf(buf + 20, "%d", &val) != 1) + return -EINVAL; + if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < + POWER_LPRX_RSSI_THRESHOLD_MIN) + return -EINVAL; + param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; + } else if (!strncmp("snooze_enable=", buf, 14)) { + if (sscanf(buf + 14, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SNOOZE_ENABLE; + } else if (!strncmp("uapsd_misbehaving=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; + } else if (!strncmp("use_ps_poll=", buf, 12)) { + if (sscanf(buf + 12, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_USE_PS_POLL; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_pm(mvm, vif, param, val); + ret = iwl_mvm_power_update_mac(mvm); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + char buf[64]; + int bufsz = sizeof(buf); + int pos; + + pos = scnprintf(buf, bufsz, "bss limit = %d\n", + vif->bss_conf.txpower); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_pm_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[512]; + int bufsz = sizeof(buf); + int pos; + + pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_mac_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u8 ap_sta_id; + struct ieee80211_chanctx_conf *chanctx_conf; + char buf[512]; + int bufsz = sizeof(buf); + int pos = 0; + int i; + + mutex_lock(&mvm->mutex); + + ap_sta_id = mvmvif->ap_sta_id; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_ADHOC: + pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n"); + break; + case NL80211_IFTYPE_STATION: + pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n"); + break; + case NL80211_IFTYPE_AP: + pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n"); + break; + case NL80211_IFTYPE_P2P_CLIENT: + pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n"); + break; + case NL80211_IFTYPE_P2P_GO: + pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n"); + break; + case NL80211_IFTYPE_P2P_DEVICE: + pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n"); + break; + default: + break; + } + + pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n", + mvmvif->id, mvmvif->color); + pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", + vif->bss_conf.bssid); + pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); + for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) + pos += scnprintf(buf+pos, bufsz-pos, + "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", + i, mvmvif->queue_params[i].txop, + mvmvif->queue_params[i].cw_min, + mvmvif->queue_params[i].cw_max, + mvmvif->queue_params[i].aifs, + mvmvif->queue_params[i].uapsd); + + if (vif->type == NL80211_IFTYPE_STATION && + ap_sta_id != IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *sta; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id], + lockdep_is_held(&mvm->mutex)); + if (!IS_ERR_OR_NULL(sta)) { + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + pos += scnprintf(buf+pos, bufsz-pos, + "ap_sta_id %d - reduced Tx power %d\n", + ap_sta_id, + mvm_sta->bt_reduced_txpower); + } + } + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + if (chanctx_conf) + pos += scnprintf(buf+pos, bufsz-pos, + "idle rx chains %d, active rx chains: %d\n", + chanctx_conf->rx_chains_static, + chanctx_conf->rx_chains_dynamic); + rcu_read_unlock(); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, + enum iwl_dbgfs_bf_mask param, int value) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + dbgfs_bf->mask |= param; + + switch (param) { + case MVM_DEBUGFS_BF_ENERGY_DELTA: + dbgfs_bf->bf_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: + dbgfs_bf->bf_roaming_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_STATE: + dbgfs_bf->bf_roaming_state = value; + break; + case MVM_DEBUGFS_BF_TEMP_THRESHOLD: + dbgfs_bf->bf_temp_threshold = value; + break; + case MVM_DEBUGFS_BF_TEMP_FAST_FILTER: + dbgfs_bf->bf_temp_fast_filter = value; + break; + case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER: + dbgfs_bf->bf_temp_slow_filter = value; + break; + case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: + dbgfs_bf->bf_enable_beacon_filter = value; + break; + case MVM_DEBUGFS_BF_DEBUG_FLAG: + dbgfs_bf->bf_debug_flag = value; + break; + case MVM_DEBUGFS_BF_ESCAPE_TIMER: + dbgfs_bf->bf_escape_timer = value; + break; + case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: + dbgfs_bf->ba_enable_beacon_abort = value; + break; + case MVM_DEBUGFS_BA_ESCAPE_TIMER: + dbgfs_bf->ba_escape_timer = value; + break; + } +} + +static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + enum iwl_dbgfs_bf_mask param; + int value, ret = 0; + + if (!strncmp("bf_energy_delta=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ENERGY_DELTA_MIN || + value > IWL_BF_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || + value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_state=", buf, 17)) { + if (sscanf(buf+17, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_STATE_MIN || + value > IWL_BF_ROAMING_STATE_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_STATE; + } else if (!strncmp("bf_temp_threshold=", buf, 18)) { + if (sscanf(buf+18, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_THRESHOLD_MIN || + value > IWL_BF_TEMP_THRESHOLD_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_THRESHOLD; + } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) { + if (sscanf(buf+20, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_FAST_FILTER_MIN || + value > IWL_BF_TEMP_FAST_FILTER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER; + } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) { + if (sscanf(buf+20, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || + value > IWL_BF_TEMP_SLOW_FILTER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER; + } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; + } else if (!strncmp("bf_debug_flag=", buf, 14)) { + if (sscanf(buf+14, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_DEBUG_FLAG; + } else if (!strncmp("bf_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ESCAPE_TIMER_MIN || + value > IWL_BF_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ESCAPE_TIMER; + } else if (!strncmp("ba_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BA_ESCAPE_TIMER_MIN || + value > IWL_BA_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BA_ESCAPE_TIMER; + } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { + if (sscanf(buf+23, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_bf(vif, param, value); + if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + else + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_bf_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = + cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT), + .ba_enable_beacon_abort = + cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT), + }; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + if (mvmvif->bf_data.bf_enabled) + cmd.bf_enable_beacon_filter = cpu_to_le32(1); + else + cmd.bf_enable_beacon_filter = 0; + + pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", + le32_to_cpu(cmd.bf_energy_delta)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", + le32_to_cpu(cmd.bf_roaming_energy_delta)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", + le32_to_cpu(cmd.bf_roaming_state)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n", + le32_to_cpu(cmd.bf_temp_threshold)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n", + le32_to_cpu(cmd.bf_temp_fast_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n", + le32_to_cpu(cmd.bf_temp_slow_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", + le32_to_cpu(cmd.bf_enable_beacon_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", + le32_to_cpu(cmd.bf_debug_flag)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", + le32_to_cpu(cmd.bf_escape_timer)); + pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", + le32_to_cpu(cmd.ba_escape_timer)); + pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", + le32_to_cpu(cmd.ba_enable_beacon_abort)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static inline char *iwl_dbgfs_is_match(char *name, char *buf) +{ + int len = strlen(name); + + return !strncmp(name, buf, len) ? buf + len : NULL; +} + +static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 value; + int ret = -EINVAL; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("tof_disabled=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.tof_disabled = value; + goto out; + } + + data = iwl_dbgfs_is_match("one_sided_disabled=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.one_sided_disabled = value; + goto out; + } + + data = iwl_dbgfs_is_match("is_debug_mode=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.is_debug_mode = value; + goto out; + } + + data = iwl_dbgfs_is_match("is_buf=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.is_buf_required = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_tof_cfg=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_config_cmd(mvm); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_enable_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_config_cmd *cmd; + + cmd = &mvm->tof_data.tof_cfg; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", + cmd->tof_disabled); + pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", + cmd->one_sided_disabled); + pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", + cmd->is_debug_mode); + pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", + cmd->is_buf_required); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 value; + int ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("burst_period=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (!ret) + mvm->tof_data.responder_cfg.burst_period = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("min_delta_ftm=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.min_delta_ftm = value; + goto out; + } + + data = iwl_dbgfs_is_match("burst_duration=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.burst_duration = value; + goto out; + } + + data = iwl_dbgfs_is_match("num_of_burst_exp=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.num_of_burst_exp = value; + goto out; + } + + data = iwl_dbgfs_is_match("abort_responder=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.abort_responder = value; + goto out; + } + + data = iwl_dbgfs_is_match("get_ch_est=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.get_ch_est = value; + goto out; + } + + data = iwl_dbgfs_is_match("recv_sta_req_params=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.recv_sta_req_params = value; + goto out; + } + + data = iwl_dbgfs_is_match("channel_num=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.channel_num = value; + goto out; + } + + data = iwl_dbgfs_is_match("bandwidth=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.bandwidth = value; + goto out; + } + + data = iwl_dbgfs_is_match("rate=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.rate = value; + goto out; + } + + data = iwl_dbgfs_is_match("bssid=", buf); + if (data) { + u8 *mac = mvm->tof_data.responder_cfg.bssid; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + } + + data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("toa_offset=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.toa_offset = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("center_freq=", buf); + if (data) { + struct iwl_tof_responder_config_cmd *cmd = + &mvm->tof_data.responder_cfg; + + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + enum ieee80211_band band = (cmd->channel_num <= 14) ? + IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + struct ieee80211_channel chn = { + .band = band, + .center_freq = ieee80211_channel_to_frequency( + cmd->channel_num, band), + }; + struct cfg80211_chan_def chandef = { + .chan = &chn, + .center_freq1 = + ieee80211_channel_to_frequency(value, + band), + }; + + cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef); + } + goto out; + } + + data = iwl_dbgfs_is_match("ftm_per_burst=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ftm_per_burst = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; + goto out; + } + + data = iwl_dbgfs_is_match("asap_mode=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.asap_mode = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_responder_cfg=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_responder_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_responder_config_cmd *cmd; + + cmd = &mvm->tof_data.responder_cfg; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", + le16_to_cpu(cmd->burst_period)); + pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", + cmd->burst_duration); + pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", + cmd->bandwidth); + pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", + cmd->channel_num); + pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", + cmd->ctrl_ch_position); + pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", + cmd->bssid); + pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", + cmd->min_delta_ftm); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", + cmd->num_of_burst_exp); + pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate); + pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", + cmd->abort_responder); + pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", + cmd->get_ch_est); + pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", + cmd->recv_sta_req_params); + pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", + cmd->ftm_per_burst); + pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", + cmd->ftm_resp_ts_avail); + pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", + cmd->asap_mode); + pos += scnprintf(buf + pos, bufsz - pos, + "tsf_timer_offset_msecs = %d\n", + le16_to_cpu(cmd->tsf_timer_offset_msecs)); + pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", + le16_to_cpu(cmd->toa_offset)); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 value; + int ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("request_id=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.request_id = value; + goto out; + } + + data = iwl_dbgfs_is_match("initiator=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.initiator = value; + goto out; + } + + data = iwl_dbgfs_is_match("one_sided_los_disable=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.one_sided_los_disable = value; + goto out; + } + + data = iwl_dbgfs_is_match("req_timeout=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.req_timeout = value; + goto out; + } + + data = iwl_dbgfs_is_match("report_policy=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.report_policy = value; + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_random=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.macaddr_random = value; + goto out; + } + + data = iwl_dbgfs_is_match("num_of_ap=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.num_of_ap = value; + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_template=", buf); + if (data) { + u8 mac[ETH_ALEN]; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_mask=", buf); + if (data) { + u8 mac[ETH_ALEN]; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); + goto out; + } + + data = iwl_dbgfs_is_match("ap=", buf); + if (data) { + struct iwl_tof_range_req_ap_entry ap = {}; + int size = sizeof(struct iwl_tof_range_req_ap_entry); + u16 burst_period; + u8 *mac = ap.bssid; + unsigned int i; + + if (sscanf(data, "%u %hhd %hhd %hhd" + "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" + "%hhd %hhd %hd" + "%hhd %hhd %d" + "%hhx %hhd %hhd %hhd", + &i, &ap.channel_num, &ap.bandwidth, + &ap.ctrl_ch_position, + mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, + &ap.measure_type, &ap.num_of_bursts, + &burst_period, + &ap.samples_per_burst, &ap.retries_per_sample, + &ap.tsf_delta, &ap.location_req, &ap.asap_mode, + &ap.enable_dyn_ack, &ap.rssi) != 20) { + ret = -EINVAL; + goto out; + } + if (i >= IWL_MVM_TOF_MAX_APS) { + IWL_ERR(mvm, "Invalid AP index %d\n", i); + ret = -EINVAL; + goto out; + } + + ap.burst_period = cpu_to_le16(burst_period); + + memcpy(&mvm->tof_data.range_req.ap[i], &ap, size); + goto out; + } + + data = iwl_dbgfs_is_match("send_range_request=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) + ret = iwl_mvm_tof_range_request_cmd(mvm, vif); + goto out; + } + + ret = -EINVAL; +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_range_req_cmd *cmd; + int i; + + cmd = &mvm->tof_data.range_req; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", + cmd->request_id); + pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", + cmd->initiator); + pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", + cmd->one_sided_los_disable); + pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", + cmd->req_timeout); + pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", + cmd->report_policy); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", + cmd->macaddr_random); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", + cmd->macaddr_template); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", + cmd->macaddr_mask); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", + cmd->num_of_ap); + for (i = 0; i < cmd->num_of_ap; i++) { + struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; + + pos += scnprintf(buf + pos, bufsz - pos, + "ap %.2d: channel_num=%hhd bw=%hhd" + " control=%hhd bssid=%pM type=%hhd" + " num_of_bursts=%hhd burst_period=%hd ftm=%hhd" + " retries=%hhd tsf_delta=%d" + " tsf_delta_direction=%hhd location_req=0x%hhx " + " asap=%hhd enable=%hhd rssi=%hhd\n", + i, ap->channel_num, ap->bandwidth, + ap->ctrl_ch_position, ap->bssid, + ap->measure_type, ap->num_of_bursts, + ap->burst_period, ap->samples_per_burst, + ap->retries_per_sample, ap->tsf_delta, + ap->tsf_delta_direction, + ap->location_req, ap->asap_mode, + ap->enable_dyn_ack, ap->rssi); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 value; + int ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.tsf_timer_offset_msec = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("min_delta_ftm=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.min_delta_ftm = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw20M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw40M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw80M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("send_range_req_ext=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) + ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); + goto out; + } + + ret = -EINVAL; +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_range_req_ext_cmd *cmd; + + cmd = &mvm->tof_data.range_req_ext; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, + "tsf_timer_offset_msec = %hd\n", + cmd->tsf_timer_offset_msec); + pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n", + cmd->min_delta_ftm); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw20M = %hhd\n", + cmd->ftm_format_and_bw20M); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw40M = %hhd\n", + cmd->ftm_format_and_bw40M); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw80M = %hhd\n", + cmd->ftm_format_and_bw80M); + + mutex_unlock(&mvm->mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 value; + int abort_id, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("abort_id=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.last_abort_id = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_range_abort=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + abort_id = mvm->tof_data.last_abort_id; + ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[32]; + int pos = 0; + const size_t bufsz = sizeof(buf); + int last_abort_id; + + mutex_lock(&mvm->mutex); + last_abort_id = mvm->tof_data.last_abort_id; + mutex_unlock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", + last_abort_id); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char *buf; + int pos = 0; + const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256; + struct iwl_tof_range_rsp_ntfy *cmd; + int i, ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + cmd = &mvm->tof_data.range_resp; + + pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", + cmd->request_id); + pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", + cmd->request_status); + pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", + cmd->last_in_batch); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", + cmd->num_of_aps); + for (i = 0; i < cmd->num_of_aps; i++) { + struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; + + pos += scnprintf(buf + pos, bufsz - pos, + "ap %.2d: bssid=%pM status=%hhd bw=%hhd" + " rtt=%d rtt_var=%d rtt_spread=%d" + " rssi=%hhd rssi_spread=%hhd" + " range=%d range_var=%d" + " time_stamp=%d\n", + i, ap->bssid, ap->measure_status, + ap->measure_bw, + ap->rtt, ap->rtt_variance, ap->rtt_spread, + ap->rssi, ap->rssi_spread, ap->range, + ap->range_variance, ap->timestamp); + } + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u8 value; + int ret; + + ret = kstrtou8(buf, 0, &value); + if (ret) + return ret; + if (value > 1) + return -EINVAL; + + mutex_lock(&mvm->mutex); + iwl_mvm_update_low_latency(mvm, vif, value); + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_low_latency_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[2]; + + buf[0] = mvmvif->low_latency ? '1' : '0'; + buf[1] = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); +} + +static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[20]; + int len; + + len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + bool ret; + + mutex_lock(&mvm->mutex); + ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid); + mutex_unlock(&mvm->mutex); + + return ret ? count : -EINVAL; +} + +static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_phy_ctxt *phy_ctxt; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + rcu_read_lock(); + + chanctx_conf = rcu_dereference(vif->chanctx_conf); + /* make sure the channel context is assigned */ + if (!chanctx_conf) { + rcu_read_unlock(); + mutex_unlock(&mvm->mutex); + return -EINVAL; + } + + phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; + rcu_read_unlock(); + + mvm->dbgfs_rx_phyinfo = value; + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, + chanctx_conf->rx_chains_static, + chanctx_conf->rx_chains_dynamic); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[8]; + + snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo); + + return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); +} + +#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) +#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) +#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, vif, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) + +MVM_DEBUGFS_READ_FILE_OPS(mac_params); +MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); +MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); + +void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct dentry *dbgfs_dir = vif->debugfs_dir; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[100]; + + /* + * Check if debugfs directory already exist before creating it. + * This may happen when, for example, resetting hw or suspend-resume + */ + if (!dbgfs_dir || mvmvif->dbgfs_dir) + return; + + mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); + + if (!mvmvif->dbgfs_dir) { + IWL_ERR(mvm, "Failed to create debugfs directory under %s\n", + dbgfs_dir->d_name.name); + return; + } + + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && + ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || + (vif->type == NL80211_IFTYPE_STATION && vif->p2p && + mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))) + MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | + S_IRUSR); + + MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + + if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + mvmvif == mvm->bf_allowed_vif) + MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && + !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { + if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) + MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, + mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, + S_IRUSR); + } + + /* + * Create symlink for convenience pointing to interface specific + * debugfs entries for the driver. For example, under + * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/ + * find + * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ + */ + snprintf(buf, 100, "../../../%s/%s/%s/%s", + dbgfs_dir->d_parent->d_parent->d_name.name, + dbgfs_dir->d_parent->d_name.name, + dbgfs_dir->d_name.name, + mvmvif->dbgfs_dir->d_name.name); + + mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, + mvm->debugfs_dir, buf); + if (!mvmvif->dbgfs_slink) + IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n", + dbgfs_dir->d_name.name); + return; +err: + IWL_ERR(mvm, "Can't create debugfs entity\n"); +} + +void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + debugfs_remove(mvmvif->dbgfs_slink); + mvmvif->dbgfs_slink = NULL; + + debugfs_remove_recursive(mvmvif->dbgfs_dir); + mvmvif->dbgfs_dir = NULL; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c new file mode 100644 index 000000000000..05928fb4021d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -0,0 +1,1516 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include + +#include "mvm.h" +#include "sta.h" +#include "iwl-io.h" +#include "debugfs.h" +#include "iwl-fw-error-dump.h" + +static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + u32 scd_q_msk; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + if (sscanf(buf, "%x", &scd_q_msk) != 1) + return -EINVAL; + + IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count; + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_sta *mvmsta; + int sta_id, drain, ret; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) + return -EINVAL; + if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) + return -EINVAL; + if (drain < 0 || drain > 1) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + + if (!mvmsta) + ret = -ENOENT; + else + ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + const struct fw_img *img; + unsigned int ofs, len; + size_t ret; + u8 *ptr; + + if (!mvm->ucode_loaded) + return -EINVAL; + + /* default is to dump the entire data segment */ + img = &mvm->fw->img[mvm->cur_ucode]; + ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + + if (mvm->dbgfs_sram_len) { + ofs = mvm->dbgfs_sram_offset; + len = mvm->dbgfs_sram_len; + } + + ptr = kzalloc(len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); + + ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); + + kfree(ptr); + + return ret; +} + +static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + const struct fw_img *img; + u32 offset, len; + u32 img_offset, img_len; + + if (!mvm->ucode_loaded) + return -EINVAL; + + img = &mvm->fw->img[mvm->cur_ucode]; + img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; + img_len = img->sec[IWL_UCODE_SECTION_DATA].len; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + if ((offset & 0x3) || (len & 0x3)) + return -EINVAL; + + if (offset + len > img_offset + img_len) + return -EINVAL; + + mvm->dbgfs_sram_offset = offset; + mvm->dbgfs_sram_len = len; + } else { + mvm->dbgfs_sram_offset = 0; + mvm->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos; + + if (!mvm->temperature_test) + pos = scnprintf(buf , sizeof(buf), "disabled\n"); + else + pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +/* + * Set NIC Temperature + * Cause the driver to ignore the actual NIC temperature reported by the FW + * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - + * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX + * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE + */ +static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int temperature; + + if (!mvm->ucode_loaded && !mvm->temperature_test) + return -EIO; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + /* not a legal temperature */ + if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && + temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || + temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { + if (!mvm->temperature_test) + goto out; + + mvm->temperature_test = false; + /* Since we can't read the temp while awake, just set + * it to zero until we get the next RX stats from the + * firmware. + */ + mvm->temperature = 0; + } else { + mvm->temperature_test = true; + mvm->temperature = temperature; + } + IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", + mvm->temperature_test ? "En" : "Dis" , + mvm->temperature); + /* handle the temperature change */ + iwl_mvm_tt_handler(mvm); + +out: + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos, temp; + + if (!mvm->ucode_loaded) + return -EIO; + + mutex_lock(&mvm->mutex); + temp = iwl_mvm_get_temp(mvm); + mutex_unlock(&mvm->mutex); + + if (temp < 0) + return temp; + + pos = scnprintf(buf , sizeof(buf), "%d\n", temp); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct ieee80211_sta *sta; + char buf[400]; + int i, pos = 0, bufsz = sizeof(buf); + + mutex_lock(&mvm->mutex); + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta) + pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); + else if (IS_ERR(sta)) + pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", + PTR_ERR(sta)); + else + pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", + sta->addr); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[64]; + int bufsz = sizeof(buf); + int pos = 0; + + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", + mvm->disable_power_off); + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", + mvm->disable_power_off_d3); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret, val; + + if (!mvm->ucode_loaded) + return -EIO; + + if (!strncmp("disable_power_off_d0=", buf, 21)) { + if (sscanf(buf + 21, "%d", &val) != 1) + return -EINVAL; + mvm->disable_power_off = val; + } else if (!strncmp("disable_power_off_d3=", buf, 21)) { + if (sscanf(buf + 21, "%d", &val) != 1) + return -EINVAL; + mvm->disable_power_off_d3 = val; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_power_update_device(mvm); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +#define BT_MBOX_MSG(_notif, _num, _field) \ + ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ + >> BT_MBOX##_num##_##_field##_POS) + + +#define BT_MBOX_PRINT(_num, _field, _end) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + "\t%s: %d%s", \ + #_field, \ + BT_MBOX_MSG(notif, _num, _field), \ + true ? "\n" : ", "); + +static +int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, + int pos, int bufsz) +{ + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); + + BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); + BT_MBOX_PRINT(0, LE_PROF1, false); + BT_MBOX_PRINT(0, LE_PROF2, false); + BT_MBOX_PRINT(0, LE_PROF_OTHER, false); + BT_MBOX_PRINT(0, CHL_SEQ_N, false); + BT_MBOX_PRINT(0, INBAND_S, false); + BT_MBOX_PRINT(0, LE_MIN_RSSI, false); + BT_MBOX_PRINT(0, LE_SCAN, false); + BT_MBOX_PRINT(0, LE_ADV, false); + BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); + BT_MBOX_PRINT(0, OPEN_CON_1, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); + + BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); + BT_MBOX_PRINT(1, IP_SR, false); + BT_MBOX_PRINT(1, LE_MSTR, false); + BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); + BT_MBOX_PRINT(1, MSG_TYPE, false); + BT_MBOX_PRINT(1, SSN, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); + + BT_MBOX_PRINT(2, SNIFF_ACT, false); + BT_MBOX_PRINT(2, PAG, false); + BT_MBOX_PRINT(2, INQUIRY, false); + BT_MBOX_PRINT(2, CONN, false); + BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); + BT_MBOX_PRINT(2, DISC, false); + BT_MBOX_PRINT(2, SCO_TX_ACT, false); + BT_MBOX_PRINT(2, SCO_RX_ACT, false); + BT_MBOX_PRINT(2, ESCO_RE_TX, false); + BT_MBOX_PRINT(2, SCO_DURATION, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); + + BT_MBOX_PRINT(3, SCO_STATE, false); + BT_MBOX_PRINT(3, SNIFF_STATE, false); + BT_MBOX_PRINT(3, A2DP_STATE, false); + BT_MBOX_PRINT(3, ACL_STATE, false); + BT_MBOX_PRINT(3, MSTR_STATE, false); + BT_MBOX_PRINT(3, OBX_STATE, false); + BT_MBOX_PRINT(3, OPEN_CON_2, false); + BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); + BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); + BT_MBOX_PRINT(3, INBAND_P, false); + BT_MBOX_PRINT(3, MSG_TYPE_2, false); + BT_MBOX_PRINT(3, SSN_2, false); + BT_MBOX_PRINT(3, UPDATE_REQUEST, true); + + return pos; +} + +static +int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif, + char *buf, int pos, int bufsz) +{ + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); + + BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); + BT_MBOX_PRINT(0, LE_PROF1, false); + BT_MBOX_PRINT(0, LE_PROF2, false); + BT_MBOX_PRINT(0, LE_PROF_OTHER, false); + BT_MBOX_PRINT(0, CHL_SEQ_N, false); + BT_MBOX_PRINT(0, INBAND_S, false); + BT_MBOX_PRINT(0, LE_MIN_RSSI, false); + BT_MBOX_PRINT(0, LE_SCAN, false); + BT_MBOX_PRINT(0, LE_ADV, false); + BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); + BT_MBOX_PRINT(0, OPEN_CON_1, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); + + BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); + BT_MBOX_PRINT(1, IP_SR, false); + BT_MBOX_PRINT(1, LE_MSTR, false); + BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); + BT_MBOX_PRINT(1, MSG_TYPE, false); + BT_MBOX_PRINT(1, SSN, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); + + BT_MBOX_PRINT(2, SNIFF_ACT, false); + BT_MBOX_PRINT(2, PAG, false); + BT_MBOX_PRINT(2, INQUIRY, false); + BT_MBOX_PRINT(2, CONN, false); + BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); + BT_MBOX_PRINT(2, DISC, false); + BT_MBOX_PRINT(2, SCO_TX_ACT, false); + BT_MBOX_PRINT(2, SCO_RX_ACT, false); + BT_MBOX_PRINT(2, ESCO_RE_TX, false); + BT_MBOX_PRINT(2, SCO_DURATION, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); + + BT_MBOX_PRINT(3, SCO_STATE, false); + BT_MBOX_PRINT(3, SNIFF_STATE, false); + BT_MBOX_PRINT(3, A2DP_STATE, false); + BT_MBOX_PRINT(3, ACL_STATE, false); + BT_MBOX_PRINT(3, MSTR_STATE, false); + BT_MBOX_PRINT(3, OBX_STATE, false); + BT_MBOX_PRINT(3, OPEN_CON_2, false); + BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); + BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); + BT_MBOX_PRINT(3, INBAND_P, false); + BT_MBOX_PRINT(3, MSG_TYPE_2, false); + BT_MBOX_PRINT(3, SSN_2, false); + BT_MBOX_PRINT(3, UPDATE_REQUEST, true); + + return pos; +} + +static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char *buf; + int ret, pos = 0, bufsz = sizeof(char) * 1024; + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + struct iwl_bt_coex_profile_notif_old *notif = + &mvm->last_bt_notif_old; + + pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz); + + pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", + notif->bt_ci_compliance); + pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", + le32_to_cpu(notif->primary_ch_lut)); + pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + pos += scnprintf(buf+pos, + bufsz-pos, "bt_activity_grading = %d\n", + le32_to_cpu(notif->bt_activity_grading)); + pos += scnprintf(buf+pos, bufsz-pos, + "antenna isolation = %d CORUN LUT index = %d\n", + mvm->last_ant_isol, mvm->last_corun_lut); + } else { + struct iwl_bt_coex_profile_notif *notif = + &mvm->last_bt_notif; + + pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); + + pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", + notif->bt_ci_compliance); + pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", + le32_to_cpu(notif->primary_ch_lut)); + pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + pos += scnprintf(buf+pos, + bufsz-pos, "bt_activity_grading = %d\n", + le32_to_cpu(notif->bt_activity_grading)); + pos += scnprintf(buf+pos, bufsz-pos, + "antenna isolation = %d CORUN LUT index = %d\n", + mvm->last_ant_isol, mvm->last_corun_lut); + } + + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; +} +#undef BT_MBOX_PRINT + +static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[256]; + int bufsz = sizeof(buf); + int pos = 0; + + mutex_lock(&mvm->mutex); + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; + + pos += scnprintf(buf+pos, bufsz-pos, + "Channel inhibition CMD\n"); + pos += scnprintf(buf+pos, bufsz-pos, + "\tPrimary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_primary_ci)); + pos += scnprintf(buf+pos, bufsz-pos, + "\tSecondary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_secondary_ci)); + + pos += scnprintf(buf+pos, bufsz-pos, + "BT Configuration CMD - 0=default, 1=never, 2=always\n"); + pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n", + mvm->bt_ack_kill_msk[0]); + pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n", + mvm->bt_cts_kill_msk[0]); + + } else { + struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; + + pos += scnprintf(buf+pos, bufsz-pos, + "Channel inhibition CMD\n"); + pos += scnprintf(buf+pos, bufsz-pos, + "\tPrimary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_primary_ci)); + pos += scnprintf(buf+pos, bufsz-pos, + "\tSecondary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_secondary_ci)); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + u32 bt_tx_prio; + + if (sscanf(buf, "%u", &bt_tx_prio) != 1) + return -EINVAL; + if (bt_tx_prio > 4) + return -EINVAL; + + mvm->bt_tx_prio = bt_tx_prio; + + return count; +} + +static ssize_t +iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + static const char * const modes_str[BT_FORCE_ANT_MAX] = { + [BT_FORCE_ANT_DIS] = "dis", + [BT_FORCE_ANT_AUTO] = "auto", + [BT_FORCE_ANT_BT] = "bt", + [BT_FORCE_ANT_WIFI] = "wifi", + }; + int ret, bt_force_ant_mode; + + for (bt_force_ant_mode = 0; + bt_force_ant_mode < ARRAY_SIZE(modes_str); + bt_force_ant_mode++) { + if (!strcmp(buf, modes_str[bt_force_ant_mode])) + break; + } + + if (bt_force_ant_mode >= ARRAY_SIZE(modes_str)) + return -EINVAL; + + ret = 0; + mutex_lock(&mvm->mutex); + if (mvm->bt_force_ant_mode == bt_force_ant_mode) + goto out; + + mvm->bt_force_ant_mode = bt_force_ant_mode; + IWL_DEBUG_COEX(mvm, "Force mode: %s\n", + modes_str[mvm->bt_force_ant_mode]); + ret = iwl_send_bt_init_conf(mvm); + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +#define PRINT_STATS_LE32(_struct, _memb) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + fmt_table, #_memb, \ + le32_to_cpu(_struct->_memb)) + +static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + static const char *fmt_table = "\t%-30s %10u\n"; + static const char *fmt_header = "%-32s\n"; + int pos = 0; + char *buf; + int ret; + /* 43 is the size of each data line, 33 is the size of each header */ + size_t bufsz = + ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + + (4 * 33) + 1; + + struct mvm_statistics_rx_phy *ofdm; + struct mvm_statistics_rx_phy *cck; + struct mvm_statistics_rx_non_phy *general; + struct mvm_statistics_rx_ht_phy *ht; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + ofdm = &mvm->rx_stats.ofdm; + cck = &mvm->rx_stats.cck; + general = &mvm->rx_stats.general; + ht = &mvm->rx_stats.ofdm_ht; + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - OFDM"); + PRINT_STATS_LE32(ofdm, ina_cnt); + PRINT_STATS_LE32(ofdm, fina_cnt); + PRINT_STATS_LE32(ofdm, plcp_err); + PRINT_STATS_LE32(ofdm, crc32_err); + PRINT_STATS_LE32(ofdm, overrun_err); + PRINT_STATS_LE32(ofdm, early_overrun_err); + PRINT_STATS_LE32(ofdm, crc32_good); + PRINT_STATS_LE32(ofdm, false_alarm_cnt); + PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); + PRINT_STATS_LE32(ofdm, sfd_timeout); + PRINT_STATS_LE32(ofdm, fina_timeout); + PRINT_STATS_LE32(ofdm, unresponded_rts); + PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); + PRINT_STATS_LE32(ofdm, sent_ack_cnt); + PRINT_STATS_LE32(ofdm, sent_cts_cnt); + PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); + PRINT_STATS_LE32(ofdm, dsp_self_kill); + PRINT_STATS_LE32(ofdm, mh_format_err); + PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); + PRINT_STATS_LE32(ofdm, reserved); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - CCK"); + PRINT_STATS_LE32(cck, ina_cnt); + PRINT_STATS_LE32(cck, fina_cnt); + PRINT_STATS_LE32(cck, plcp_err); + PRINT_STATS_LE32(cck, crc32_err); + PRINT_STATS_LE32(cck, overrun_err); + PRINT_STATS_LE32(cck, early_overrun_err); + PRINT_STATS_LE32(cck, crc32_good); + PRINT_STATS_LE32(cck, false_alarm_cnt); + PRINT_STATS_LE32(cck, fina_sync_err_cnt); + PRINT_STATS_LE32(cck, sfd_timeout); + PRINT_STATS_LE32(cck, fina_timeout); + PRINT_STATS_LE32(cck, unresponded_rts); + PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); + PRINT_STATS_LE32(cck, sent_ack_cnt); + PRINT_STATS_LE32(cck, sent_cts_cnt); + PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); + PRINT_STATS_LE32(cck, dsp_self_kill); + PRINT_STATS_LE32(cck, mh_format_err); + PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); + PRINT_STATS_LE32(cck, reserved); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - GENERAL"); + PRINT_STATS_LE32(general, bogus_cts); + PRINT_STATS_LE32(general, bogus_ack); + PRINT_STATS_LE32(general, non_bssid_frames); + PRINT_STATS_LE32(general, filtered_frames); + PRINT_STATS_LE32(general, non_channel_beacons); + PRINT_STATS_LE32(general, channel_beacons); + PRINT_STATS_LE32(general, num_missed_bcon); + PRINT_STATS_LE32(general, adc_rx_saturation_time); + PRINT_STATS_LE32(general, ina_detection_search_time); + PRINT_STATS_LE32(general, beacon_silence_rssi_a); + PRINT_STATS_LE32(general, beacon_silence_rssi_b); + PRINT_STATS_LE32(general, beacon_silence_rssi_c); + PRINT_STATS_LE32(general, interference_data_flag); + PRINT_STATS_LE32(general, channel_load); + PRINT_STATS_LE32(general, dsp_false_alarms); + PRINT_STATS_LE32(general, beacon_rssi_a); + PRINT_STATS_LE32(general, beacon_rssi_b); + PRINT_STATS_LE32(general, beacon_rssi_c); + PRINT_STATS_LE32(general, beacon_energy_a); + PRINT_STATS_LE32(general, beacon_energy_b); + PRINT_STATS_LE32(general, beacon_energy_c); + PRINT_STATS_LE32(general, num_bt_kills); + PRINT_STATS_LE32(general, mac_id); + PRINT_STATS_LE32(general, directed_data_mpdu); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - HT"); + PRINT_STATS_LE32(ht, plcp_err); + PRINT_STATS_LE32(ht, overrun_err); + PRINT_STATS_LE32(ht, early_overrun_err); + PRINT_STATS_LE32(ht, crc32_good); + PRINT_STATS_LE32(ht, crc32_err); + PRINT_STATS_LE32(ht, mh_format_err); + PRINT_STATS_LE32(ht, agg_crc32_good); + PRINT_STATS_LE32(ht, agg_mpdu_cnt); + PRINT_STATS_LE32(ht, agg_cnt); + PRINT_STATS_LE32(ht, unsupport_mcs); + + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; +} +#undef PRINT_STAT_LE32 + +static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, + char __user *user_buf, size_t count, + loff_t *ppos, + struct iwl_mvm_frame_stats *stats) +{ + char *buff, *pos, *endpos; + int idx, i; + int ret; + static const size_t bufsz = 1024; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + spin_lock_bh(&mvm->drv_stats_lock); + + pos = buff; + endpos = pos + bufsz; + + pos += scnprintf(pos, endpos - pos, + "Legacy/HT/VHT\t:\t%d/%d/%d\n", + stats->legacy_frames, + stats->ht_frames, + stats->vht_frames); + pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", + stats->bw_20_frames, + stats->bw_40_frames, + stats->bw_80_frames); + pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", + stats->ngi_frames, + stats->sgi_frames); + pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", + stats->siso_frames, + stats->mimo2_frames); + pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", + stats->fail_frames, + stats->success_frames); + pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", + stats->agg_frames); + pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", + stats->ampdu_count); + pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", + stats->ampdu_count > 0 ? + (stats->agg_frames / stats->ampdu_count) : 0); + + pos += scnprintf(pos, endpos - pos, "Last Rates\n"); + + idx = stats->last_frame_idx - 1; + for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { + idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); + if (stats->last_rates[idx] == 0) + continue; + pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", + (int)(ARRAY_SIZE(stats->last_rates) - i)); + pos += rs_pretty_print_rate(pos, stats->last_rates[idx]); + } + spin_unlock_bh(&mvm->drv_stats_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + + return ret; +} + +static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + + return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, + &mvm->drv_rx_stats); +} + +static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + + mutex_lock(&mvm->mutex); + + /* allow one more restart that we're provoking here */ + if (mvm->restart_fw >= 0) + mvm->restart_fw++; + + /* take the return value to make compiler happy - it will fail anyway */ + ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL); + + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI); + if (ret) + return ret; + + iwl_force_nmi(mvm->trans); + + iwl_mvm_unref(mvm, IWL_MVM_REF_NMI); + + return count; +} + +static ssize_t +iwl_dbgfs_scan_ant_rxchain_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int pos = 0; + char buf[32]; + const size_t bufsz = sizeof(buf); + + /* print which antennas were set for the scan command by the user */ + pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); + if (mvm->scan_rx_ant & ANT_A) + pos += scnprintf(buf + pos, bufsz - pos, "A"); + if (mvm->scan_rx_ant & ANT_B) + pos += scnprintf(buf + pos, bufsz - pos, "B"); + if (mvm->scan_rx_ant & ANT_C) + pos += scnprintf(buf + pos, bufsz - pos, "C"); + pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + u8 scan_rx_ant; + + if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) + return -EINVAL; + if (scan_rx_ant > ANT_ABC) + return -EINVAL; + if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) + return -EINVAL; + + if (mvm->scan_rx_ant != scan_rx_ant) { + mvm->scan_rx_ant = scan_rx_ant; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); + } + + return count; +} + +static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int conf; + char buf[8]; + const size_t bufsz = sizeof(buf); + int pos = 0; + + mutex_lock(&mvm->mutex); + conf = mvm->fw_dbg_conf; + mutex_unlock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + unsigned int conf_id; + int ret; + + ret = kstrtouint(buf, 0, &conf_id); + if (ret) + return ret; + + if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); + + if (ret) + return ret; + + iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); + + return count; +} + +#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING +static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_bcast_filter_cmd cmd; + const struct iwl_fw_bcast_filter *filter; + char *buf; + int bufsz = 1024; + int i, j, pos = 0; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { + ADD_TEXT("None\n"); + mutex_unlock(&mvm->mutex); + goto out; + } + mutex_unlock(&mvm->mutex); + + for (i = 0; cmd.filters[i].attrs[0].mask; i++) { + filter = &cmd.filters[i]; + + ADD_TEXT("Filter [%d]:\n", i); + ADD_TEXT("\tDiscard=%d\n", filter->discard); + ADD_TEXT("\tFrame Type: %s\n", + filter->frame_type ? "IPv4" : "Generic"); + + for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { + const struct iwl_fw_bcast_filter_attr *attr; + + attr = &filter->attrs[j]; + if (!attr->mask) + break; + + ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", + j, attr->offset, + attr->offset_type ? "IP End" : + "Payload Start", + be32_to_cpu(attr->mask), + be32_to_cpu(attr->val), + le16_to_cpu(attr->reserved1)); + } + } +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int pos, next_pos; + struct iwl_fw_bcast_filter filter = {}; + struct iwl_bcast_filter_cmd cmd; + u32 filter_id, attr_id, mask, value; + int err = 0; + + if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, + &filter.frame_type, &pos) != 3) + return -EINVAL; + + if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || + filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) + return -EINVAL; + + for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); + attr_id++) { + struct iwl_fw_bcast_filter_attr *attr = + &filter.attrs[attr_id]; + + if (pos >= count) + break; + + if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", + &attr->offset, &attr->offset_type, + &mask, &value, &next_pos) != 4) + return -EINVAL; + + attr->mask = cpu_to_be32(mask); + attr->val = cpu_to_be32(value); + if (mask) + filter.num_attrs++; + + pos += next_pos; + } + + mutex_lock(&mvm->mutex); + memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], + &filter, sizeof(filter)); + + /* send updated bcast filtering configuration */ + if (mvm->dbgfs_bcast_filtering.override && + iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) + err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, + sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return err ?: count; +} + +static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_bcast_filter_cmd cmd; + char *buf; + int bufsz = 1024; + int i, pos = 0; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { + ADD_TEXT("None\n"); + mutex_unlock(&mvm->mutex); + goto out; + } + mutex_unlock(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { + const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; + + ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", + i, mac->default_discard, mac->attached_filters); + } +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_bcast_filter_cmd cmd; + struct iwl_fw_bcast_mac mac = {}; + u32 mac_id, attached_filters; + int err = 0; + + if (!mvm->bcast_filters) + return -ENOENT; + + if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, + &attached_filters) != 3) + return -EINVAL; + + if (mac_id >= ARRAY_SIZE(cmd.macs) || + mac.default_discard > 1 || + attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) + return -EINVAL; + + mac.attached_filters = cpu_to_le16(attached_filters); + + mutex_lock(&mvm->mutex); + memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], + &mac, sizeof(mac)); + + /* send updated bcast filtering configuration */ + if (mvm->dbgfs_bcast_filtering.override && + iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) + err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, + sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return err ?: count; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int store; + + if (sscanf(buf, "%d", &store) != 1) + return -EINVAL; + + mvm->store_d3_resume_sram = store; + + return count; +} + +static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + const struct fw_img *img; + int ofs, len, pos = 0; + size_t bufsz, ret; + char *buf; + u8 *ptr = mvm->d3_resume_sram; + + img = &mvm->fw->img[IWL_UCODE_WOWLAN]; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + + bufsz = len * 4 + 256; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n", + mvm->store_d3_resume_sram ? "en" : "dis"); + + if (ptr) { + for (ofs = 0; ofs < len; ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x %16ph\n", ofs, ptr + ofs); + } + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "(no data captured)\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + + kfree(buf); + + return ret; +} +#endif + +#define PRINT_MVM_REF(ref) do { \ + if (mvm->refs[ref]) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + "\t(0x%lx): %d %s\n", \ + BIT(ref), mvm->refs[ref], #ref); \ +} while (0) + +static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int i, pos = 0; + char buf[256]; + const size_t bufsz = sizeof(buf); + u32 refs = 0; + + for (i = 0; i < IWL_MVM_REF_COUNT; i++) + if (mvm->refs[i]) + refs |= BIT(i); + + pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n", + refs); + + PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN); + PRINT_MVM_REF(IWL_MVM_REF_SCAN); + PRINT_MVM_REF(IWL_MVM_REF_ROC); + PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX); + PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); + PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); + PRINT_MVM_REF(IWL_MVM_REF_USER); + PRINT_MVM_REF(IWL_MVM_REF_TX); + PRINT_MVM_REF(IWL_MVM_REF_TX_AGG); + PRINT_MVM_REF(IWL_MVM_REF_ADD_IF); + PRINT_MVM_REF(IWL_MVM_REF_START_AP); + PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED); + PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX); + PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS); + PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL); + PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ); + PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE); + PRINT_MVM_REF(IWL_MVM_REF_NMI); + PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); + PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); + PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); + PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + unsigned long value; + int ret; + bool taken; + + ret = kstrtoul(buf, 10, &value); + if (ret < 0) + return ret; + + mutex_lock(&mvm->mutex); + + taken = mvm->refs[IWL_MVM_REF_USER]; + if (value == 1 && !taken) + iwl_mvm_ref(mvm, IWL_MVM_REF_USER); + else if (value == 0 && taken) + iwl_mvm_unref(mvm, IWL_MVM_REF_USER); + else + ret = -EINVAL; + + mutex_unlock(&mvm->mutex); + + if (ret < 0) + return ret; + return count; +} + +#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) +#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) +#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ + if (!debugfs_create_file(alias, mode, parent, mvm, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) +#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ + MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +static ssize_t +iwl_dbgfs_prph_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int pos = 0; + char buf[32]; + const size_t bufsz = sizeof(buf); + int ret; + + if (!mvm->dbgfs_prph_reg_addr) + return -EINVAL; + + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ); + if (ret) + return ret; + + pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", + mvm->dbgfs_prph_reg_addr, + iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + u8 args; + u32 value; + int ret; + + args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); + /* if we only want to set the reg address - nothing more to do */ + if (args == 1) + goto out; + + /* otherwise, make sure we have both address and value */ + if (args != 2) + return -EINVAL; + + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); + if (ret) + return ret; + + iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); +out: + return count; +} + +static ssize_t +iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); + +/* Device wide debugfs entries */ +MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); +MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); +MVM_DEBUGFS_READ_FILE_OPS(nic_temp); +MVM_DEBUGFS_READ_FILE_OPS(stations); +MVM_DEBUGFS_READ_FILE_OPS(bt_notif); +MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); +MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); +MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8); + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); +#endif + +#ifdef CONFIG_PM_SLEEP +MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); +#endif + +int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) +{ + struct dentry *bcast_dir __maybe_unused; + char buf[100]; + + spin_lock_init(&mvm->drv_stats_lock); + + mvm->debugfs_dir = dbgfs_dir; + + MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, + S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, + S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); + if (!debugfs_create_bool("enable_scan_iteration_notif", + S_IRUSR | S_IWUSR, + mvm->debugfs_dir, + &mvm->scan_iter_notif_enabled)) + goto err; + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { + bcast_dir = debugfs_create_dir("bcast_filtering", + mvm->debugfs_dir); + if (!bcast_dir) + goto err; + + if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR, + bcast_dir, + &mvm->dbgfs_bcast_filtering.override)) + goto err; + + MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, + bcast_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, + bcast_dir, S_IWUSR | S_IRUSR); + } +#endif + +#ifdef CONFIG_PM_SLEEP + MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR); + if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, &mvm->d3_wake_sysassert)) + goto err; + if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR, + mvm->debugfs_dir, &mvm->last_netdetect_scans)) + goto err; +#endif + + if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, + &mvm->low_latency_agg_frame_limit)) + goto err; + if (!debugfs_create_u8("ps_disabled", S_IRUSR, + mvm->debugfs_dir, &mvm->ps_disabled)) + goto err; + if (!debugfs_create_blob("nvm_hw", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_hw_blob)) + goto err; + if (!debugfs_create_blob("nvm_sw", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_sw_blob)) + goto err; + if (!debugfs_create_blob("nvm_calib", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_calib_blob)) + goto err; + if (!debugfs_create_blob("nvm_prod", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_prod_blob)) + goto err; + if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) + goto err; + + /* + * Create a symlink with mac80211. It will be removed when mac80211 + * exists (before the opmode exists which removes the target.) + */ + snprintf(buf, 100, "../../%s/%s", + dbgfs_dir->d_parent->d_parent->d_name.name, + dbgfs_dir->d_parent->d_name.name); + if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf)) + goto err; + + return 0; +err: + IWL_ERR(mvm, "Can't create the mvm debugfs directory\n"); + return -ENOMEM; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h new file mode 100644 index 000000000000..8c4190e7e027 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h @@ -0,0 +1,103 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#define MVM_DEBUGFS_READ_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + argtype *arg = file->private_data; \ + char buf[buflen] = {}; \ + size_t buf_size = min(count, sizeof(buf) - 1); \ + \ + if (copy_from_user(buf, user_buf, buf_size)) \ + return -EFAULT; \ + \ + return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \ +} \ + +#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ +MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ +MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h new file mode 100644 index 000000000000..d398a6102805 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h @@ -0,0 +1,476 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_bt_coex_h__ +#define __fw_api_bt_coex_h__ + +#include +#include + +#define BITS(nb) (BIT(nb) - 1) + +/** + * enum iwl_bt_coex_flags - flags for BT_COEX command + * @BT_COEX_MODE_POS: + * @BT_COEX_MODE_MSK: + * @BT_COEX_DISABLE_OLD: + * @BT_COEX_2W_OLD: + * @BT_COEX_3W_OLD: + * @BT_COEX_NW_OLD: + * @BT_COEX_AUTO_OLD: + * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests) + * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests) + * @BT_COEX_SYNC2SCO: + * @BT_COEX_CORUNNING: + * @BT_COEX_MPLUT: + * @BT_COEX_TTC: + * @BT_COEX_RRC: + * + * The COEX_MODE must be set for each command. Even if it is not changed. + */ +enum iwl_bt_coex_flags { + BT_COEX_MODE_POS = 3, + BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS, + BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS, + BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS, + BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS, + BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS, + BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS, + BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS, + BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS, + BT_COEX_SYNC2SCO = BIT(7), + BT_COEX_CORUNNING = BIT(8), + BT_COEX_MPLUT = BIT(9), + BT_COEX_TTC = BIT(20), + BT_COEX_RRC = BIT(21), +}; + +/* + * indicates what has changed in the BT_COEX command. + * BT_VALID_ENABLE must be set for each command. Commands without this bit will + * discarded by the firmware + */ +enum iwl_bt_coex_valid_bit_msk { + BT_VALID_ENABLE = BIT(0), + BT_VALID_BT_PRIO_BOOST = BIT(1), + BT_VALID_MAX_KILL = BIT(2), + BT_VALID_3W_TMRS = BIT(3), + BT_VALID_KILL_ACK = BIT(4), + BT_VALID_KILL_CTS = BIT(5), + BT_VALID_REDUCED_TX_POWER = BIT(6), + BT_VALID_LUT = BIT(7), + BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8), + BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9), + BT_VALID_MULTI_PRIO_LUT = BIT(10), + BT_VALID_TRM_KICK_FILTER = BIT(11), + BT_VALID_CORUN_LUT_20 = BIT(12), + BT_VALID_CORUN_LUT_40 = BIT(13), + BT_VALID_ANT_ISOLATION = BIT(14), + BT_VALID_ANT_ISOLATION_THRS = BIT(15), + BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16), + BT_VALID_TXRX_MAX_FREQ_0 = BIT(17), + BT_VALID_SYNC_TO_SCO = BIT(18), + BT_VALID_TTC = BIT(20), + BT_VALID_RRC = BIT(21), +}; + +/** + * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames. + * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames + * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames + * + * This mechanism allows to have BT and WiFi run concurrently. Since WiFi + * reduces its Tx power, it can work along with BT, hence reducing the amount + * of WiFi frames being killed by BT. + */ +enum iwl_bt_reduced_tx_power { + BT_REDUCED_TX_POWER_CTL = BIT(0), + BT_REDUCED_TX_POWER_DATA = BIT(1), +}; + +enum iwl_bt_coex_lut_type { + BT_COEX_TIGHT_LUT = 0, + BT_COEX_LOOSE_LUT, + BT_COEX_TX_DIS_LUT, + + BT_COEX_MAX_LUT, + BT_COEX_INVALID_LUT = 0xff, +}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ + +#define BT_COEX_LUT_SIZE (12) +#define BT_COEX_CORUN_LUT_SIZE (32) +#define BT_COEX_MULTI_PRIO_LUT_SIZE (2) +#define BT_COEX_BOOST_SIZE (4) +#define BT_REDUCED_TX_POWER_BIT BIT(7) + +/** + * struct iwl_bt_coex_cmd_old - bt coex configuration command + * @flags:&enum iwl_bt_coex_flags + * @max_kill: + * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power + * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT + * should be set by default + * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT + * should be set by default + * @bt4_antenna_isolation: antenna isolation + * @bt4_antenna_isolation_thr: antenna threshold value + * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency + * @bt4_tx_rx_max_freq0: TxRx max frequency + * @bt_prio_boost: BT priority boost registers + * @wifi_tx_prio_boost: SW boost of wifi tx priority + * @wifi_rx_prio_boost: SW boost of wifi rx priority + * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK. + * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS. + * @decision_lut: PTA decision LUT, per Prio-Ch + * @bt4_multiprio_lut: multi priority LUT configuration + * @bt4_corun_lut20: co-running 20 MHz LUT configuration + * @bt4_corun_lut40: co-running 40 MHz LUT configuration + * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk + * + * The structure is used for the BT_COEX command. + */ +struct iwl_bt_coex_cmd_old { + __le32 flags; + u8 max_kill; + u8 bt_reduced_tx_power; + u8 override_primary_lut; + u8 override_secondary_lut; + + u8 bt4_antenna_isolation; + u8 bt4_antenna_isolation_thr; + u8 bt4_tx_tx_delta_freq_thr; + u8 bt4_tx_rx_max_freq0; + + __le32 bt_prio_boost[BT_COEX_BOOST_SIZE]; + __le32 wifi_tx_prio_boost; + __le32 wifi_rx_prio_boost; + __le32 kill_ack_msk; + __le32 kill_cts_msk; + + __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE]; + __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE]; + __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE]; + __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE]; + + __le32 valid_bit_msk; +} __packed; /* BT_COEX_CMD_API_S_VER_5 */ + +enum iwl_bt_coex_mode { + BT_COEX_DISABLE = 0x0, + BT_COEX_NW = 0x1, + BT_COEX_BT = 0x2, + BT_COEX_WIFI = 0x3, +}; /* BT_COEX_MODES_E */ + +enum iwl_bt_coex_enabled_modules { + BT_COEX_MPLUT_ENABLED = BIT(0), + BT_COEX_MPLUT_BOOST_ENABLED = BIT(1), + BT_COEX_SYNC2SCO_ENABLED = BIT(2), + BT_COEX_CORUN_ENABLED = BIT(3), + BT_COEX_HIGH_BAND_RET = BIT(4), +}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */ + +/** + * struct iwl_bt_coex_cmd - bt coex configuration command + * @mode: enum %iwl_bt_coex_mode + * @enabled_modules: enum %iwl_bt_coex_enabled_modules + * + * The structure is used for the BT_COEX command. + */ +struct iwl_bt_coex_cmd { + __le32 mode; + __le32 enabled_modules; +} __packed; /* BT_COEX_CMD_API_S_VER_6 */ + +/** + * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut + * @corun_lut20: co-running 20 MHz LUT configuration + * @corun_lut40: co-running 40 MHz LUT configuration + * + * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command. + */ +struct iwl_bt_coex_corun_lut_update_cmd { + __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; + __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; +} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ + +/** + * struct iwl_bt_coex_reduced_txp_update_cmd + * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the + * bits are the sta_id (value) + */ +struct iwl_bt_coex_reduced_txp_update_cmd { + __le32 reduced_txp; +} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */ + +/** + * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command + * @bt_primary_ci: + * @primary_ch_phy_id: + * @bt_secondary_ci: + * @secondary_ch_phy_id: + * + * Used for BT_COEX_CI command + */ +struct iwl_bt_coex_ci_cmd { + __le64 bt_primary_ci; + __le32 primary_ch_phy_id; + + __le64 bt_secondary_ci; + __le32 secondary_ch_phy_id; +} __packed; /* BT_CI_MSG_API_S_VER_2 */ + +#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ + BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ + BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS + +enum iwl_bt_mxbox_dw0 { + BT_MBOX(0, LE_SLAVE_LAT, 0, 3), + BT_MBOX(0, LE_PROF1, 3, 1), + BT_MBOX(0, LE_PROF2, 4, 1), + BT_MBOX(0, LE_PROF_OTHER, 5, 1), + BT_MBOX(0, CHL_SEQ_N, 8, 4), + BT_MBOX(0, INBAND_S, 13, 1), + BT_MBOX(0, LE_MIN_RSSI, 16, 4), + BT_MBOX(0, LE_SCAN, 20, 1), + BT_MBOX(0, LE_ADV, 21, 1), + BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), + BT_MBOX(0, OPEN_CON_1, 28, 2), +}; + +enum iwl_bt_mxbox_dw1 { + BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), + BT_MBOX(1, IP_SR, 4, 1), + BT_MBOX(1, LE_MSTR, 5, 1), + BT_MBOX(1, AGGR_TRFC_LD, 8, 6), + BT_MBOX(1, MSG_TYPE, 16, 3), + BT_MBOX(1, SSN, 19, 2), +}; + +enum iwl_bt_mxbox_dw2 { + BT_MBOX(2, SNIFF_ACT, 0, 3), + BT_MBOX(2, PAG, 3, 1), + BT_MBOX(2, INQUIRY, 4, 1), + BT_MBOX(2, CONN, 5, 1), + BT_MBOX(2, SNIFF_INTERVAL, 8, 5), + BT_MBOX(2, DISC, 13, 1), + BT_MBOX(2, SCO_TX_ACT, 16, 2), + BT_MBOX(2, SCO_RX_ACT, 18, 2), + BT_MBOX(2, ESCO_RE_TX, 20, 2), + BT_MBOX(2, SCO_DURATION, 24, 6), +}; + +enum iwl_bt_mxbox_dw3 { + BT_MBOX(3, SCO_STATE, 0, 1), + BT_MBOX(3, SNIFF_STATE, 1, 1), + BT_MBOX(3, A2DP_STATE, 2, 1), + BT_MBOX(3, ACL_STATE, 3, 1), + BT_MBOX(3, MSTR_STATE, 4, 1), + BT_MBOX(3, OBX_STATE, 5, 1), + BT_MBOX(3, OPEN_CON_2, 8, 2), + BT_MBOX(3, TRAFFIC_LOAD, 10, 2), + BT_MBOX(3, CHL_SEQN_LSB, 12, 1), + BT_MBOX(3, INBAND_P, 13, 1), + BT_MBOX(3, MSG_TYPE_2, 16, 3), + BT_MBOX(3, SSN_2, 19, 2), + BT_MBOX(3, UPDATE_REQUEST, 21, 1), +}; + +#define BT_MBOX_MSG(_notif, _num, _field) \ + ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ + >> BT_MBOX##_num##_##_field##_POS) + +enum iwl_bt_activity_grading { + BT_OFF = 0, + BT_ON_NO_CONNECTION = 1, + BT_LOW_TRAFFIC = 2, + BT_HIGH_TRAFFIC = 3, + + BT_MAX_AG, +}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */ + +enum iwl_bt_ci_compliance { + BT_CI_COMPLIANCE_NONE = 0, + BT_CI_COMPLIANCE_PRIMARY = 1, + BT_CI_COMPLIANCE_SECONDARY = 2, + BT_CI_COMPLIANCE_BOTH = 3, +}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ + +#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \ + (_ttc_rrc_status & BIT(_phy_id)) + +#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \ + ((_ttc_rrc_status >> 4) & BIT(_phy_id)) + +/** + * struct iwl_bt_coex_profile_notif - notification about BT coex + * @mbox_msg: message from BT to WiFi + * @msg_idx: the index of the message + * @bt_ci_compliance: enum %iwl_bt_ci_compliance + * @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type + * @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type + * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading + * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY + */ +struct iwl_bt_coex_profile_notif { + __le32 mbox_msg[4]; + __le32 msg_idx; + __le32 bt_ci_compliance; + + __le32 primary_ch_lut; + __le32 secondary_ch_lut; + __le32 bt_activity_grading; + u8 ttc_rrc_status; + u8 reserved[3]; +} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ + +enum iwl_bt_coex_prio_table_event { + BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4, + BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5, + BT_COEX_PRIO_TBL_EVT_DTIM = 6, + BT_COEX_PRIO_TBL_EVT_SCAN52 = 7, + BT_COEX_PRIO_TBL_EVT_SCAN24 = 8, + BT_COEX_PRIO_TBL_EVT_IDLE = 9, + BT_COEX_PRIO_TBL_EVT_MAX = 16, +}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */ + +enum iwl_bt_coex_prio_table_prio { + BT_COEX_PRIO_TBL_DISABLED = 0, + BT_COEX_PRIO_TBL_PRIO_LOW = 1, + BT_COEX_PRIO_TBL_PRIO_HIGH = 2, + BT_COEX_PRIO_TBL_PRIO_BYPASS = 3, + BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4, + BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5, + BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6, + BT_COEX_PRIO_TBL_MAX = 8, +}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */ + +#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0) +#define BT_COEX_PRIO_TBL_PRIO_POS (1) +#define BT_COEX_PRIO_TBL_RESERVED_POS (4) + +/** + * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex + * @prio_tbl: + */ +struct iwl_bt_coex_prio_tbl_cmd { + u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX]; +} __packed; + +/** + * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command + * @bt_primary_ci: + * @bt_secondary_ci: + * @co_run_bw_primary: + * @co_run_bw_secondary: + * @primary_ch_phy_id: + * @secondary_ch_phy_id: + * + * Used for BT_COEX_CI command + */ +struct iwl_bt_coex_ci_cmd_old { + __le64 bt_primary_ci; + __le64 bt_secondary_ci; + + u8 co_run_bw_primary; + u8 co_run_bw_secondary; + u8 primary_ch_phy_id; + u8 secondary_ch_phy_id; +} __packed; /* BT_CI_MSG_API_S_VER_1 */ + +/** + * struct iwl_bt_coex_profile_notif_old - notification about BT coex + * @mbox_msg: message from BT to WiFi + * @msg_idx: the index of the message + * @bt_status: 0 - off, 1 - on + * @bt_open_conn: number of BT connections open + * @bt_traffic_load: load of BT traffic + * @bt_agg_traffic_load: aggregated load of BT traffic + * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant + * @primary_ch_lut: LUT used for primary channel + * @secondary_ch_lut: LUT used for secondary channel + * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading + */ +struct iwl_bt_coex_profile_notif_old { + __le32 mbox_msg[4]; + __le32 msg_idx; + u8 bt_status; + u8 bt_open_conn; + u8 bt_traffic_load; + u8 bt_agg_traffic_load; + u8 bt_ci_compliance; + u8 ttc_enabled; + u8 rrc_enabled; + u8 reserved; + + __le32 primary_ch_lut; + __le32 secondary_ch_lut; + __le32 bt_activity_grading; +} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */ + +#endif /* __fw_api_bt_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h new file mode 100644 index 000000000000..20521bebb0b1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -0,0 +1,425 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_d3_h__ +#define __fw_api_d3_h__ + +/** + * enum iwl_d3_wakeup_flags - D3 manager wakeup flags + * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert + */ +enum iwl_d3_wakeup_flags { + IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), +}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ + +/** + * struct iwl_d3_manager_config - D3 manager configuration command + * @min_sleep_time: minimum sleep time (in usec) + * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags + * @wakeup_host_timer: force wakeup after this many seconds + * + * The structure is used for the D3_CONFIG_CMD command. + */ +struct iwl_d3_manager_config { + __le32 min_sleep_time; + __le32 wakeup_flags; + __le32 wakeup_host_timer; +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ + + +/* TODO: OFFLOADS_QUERY_API_S_VER_1 */ + +/** + * enum iwl_d3_proto_offloads - enabled protocol offloads + * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled + * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled + */ +enum iwl_proto_offloads { + IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), + IWL_D3_PROTO_OFFLOAD_NS = BIT(1), +}; + +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12 + +#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4 +#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2 + +/** + * struct iwl_proto_offload_cmd_common - ARP/NS offload common part + * @enabled: enable flags + * @remote_ipv4_addr: remote address to answer to (or zero if all) + * @host_ipv4_addr: our IPv4 address to respond to queries for + * @arp_mac_addr: our MAC address for ARP responses + * @reserved: unused + */ +struct iwl_proto_offload_cmd_common { + __le32 enabled; + __be32 remote_ipv4_addr; + __be32 host_ipv4_addr; + u8 arp_mac_addr[ETH_ALEN]; + __le16 reserved; +} __packed; + +/** + * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor solicitation response MAC address + */ +struct iwl_proto_offload_cmd_v1 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; + u8 ndp_mac_addr[ETH_ALEN]; + __le16 reserved2; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ + +/** + * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor solicitation response MAC address + */ +struct iwl_proto_offload_cmd_v2 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; + u8 ndp_mac_addr[ETH_ALEN]; + u8 numValidIPv6Addresses; + u8 reserved2[3]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ + +struct iwl_ns_config { + struct in6_addr source_ipv6_addr; + struct in6_addr dest_ipv6_addr; + u8 target_mac_addr[ETH_ALEN]; + __le16 reserved; +} __packed; /* NS_OFFLOAD_CONFIG */ + +struct iwl_targ_addr { + struct in6_addr addr; + __le32 config_num; +} __packed; /* TARGET_IPV6_ADDRESS */ + +/** + * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @target_ipv6_addr: target IPv6 addresses + * @ns_config: NS offload configurations + */ +struct iwl_proto_offload_cmd_v3_small { + struct iwl_proto_offload_cmd_common common; + __le32 num_valid_ipv6_addrs; + struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S]; + struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ + +/** + * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @target_ipv6_addr: target IPv6 addresses + * @ns_config: NS offload configurations + */ +struct iwl_proto_offload_cmd_v3_large { + struct iwl_proto_offload_cmd_common common; + __le32 num_valid_ipv6_addrs; + struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; + struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ + +/* + * WOWLAN_PATTERNS + */ +#define IWL_WOWLAN_MIN_PATTERN_LEN 16 +#define IWL_WOWLAN_MAX_PATTERN_LEN 128 + +struct iwl_wowlan_pattern { + u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; + u8 mask_size; + u8 pattern_size; + __le16 reserved; +} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ + +#define IWL_WOWLAN_MAX_PATTERNS 20 + +struct iwl_wowlan_patterns_cmd { + __le32 n_patterns; + struct iwl_wowlan_pattern patterns[]; +} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ + +enum iwl_wowlan_wakeup_filters { + IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), + IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), + IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), + IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), + IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), + IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), + IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), + IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), + IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), + IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), + IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), + IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11), + IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), + IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13), + IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14), + IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15), + IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), +}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ + +struct iwl_wowlan_config_cmd { + __le32 wakeup_filter; + __le16 non_qos_seq; + __le16 qos_seq[8]; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; + u8 offloading_tid; + u8 reserved[3]; +} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */ + +/* + * WOWLAN_TSC_RSC_PARAMS + */ +#define IWL_NUM_RSC 16 + +struct tkip_sc { + __le16 iv16; + __le16 pad; + __le32 iv32; +} __packed; /* TKIP_SC_API_U_VER_1 */ + +struct iwl_tkip_rsc_tsc { + struct tkip_sc unicast_rsc[IWL_NUM_RSC]; + struct tkip_sc multicast_rsc[IWL_NUM_RSC]; + struct tkip_sc tsc; +} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ + +struct aes_sc { + __le64 pn; +} __packed; /* TKIP_AES_SC_API_U_VER_1 */ + +struct iwl_aes_rsc_tsc { + struct aes_sc unicast_rsc[IWL_NUM_RSC]; + struct aes_sc multicast_rsc[IWL_NUM_RSC]; + struct aes_sc tsc; +} __packed; /* AES_TSC_RSC_API_S_VER_1 */ + +union iwl_all_tsc_rsc { + struct iwl_tkip_rsc_tsc tkip; + struct iwl_aes_rsc_tsc aes; +}; /* ALL_TSC_RSC_API_S_VER_2 */ + +struct iwl_wowlan_rsc_tsc_params_cmd { + union iwl_all_tsc_rsc all_tsc_rsc; +} __packed; /* ALL_TSC_RSC_API_S_VER_2 */ + +#define IWL_MIC_KEY_SIZE 8 +struct iwl_mic_keys { + u8 tx[IWL_MIC_KEY_SIZE]; + u8 rx_unicast[IWL_MIC_KEY_SIZE]; + u8 rx_mcast[IWL_MIC_KEY_SIZE]; +} __packed; /* MIC_KEYS_API_S_VER_1 */ + +#define IWL_P1K_SIZE 5 +struct iwl_p1k_cache { + __le16 p1k[IWL_P1K_SIZE]; +} __packed; + +#define IWL_NUM_RX_P1K_CACHE 2 + +struct iwl_wowlan_tkip_params_cmd { + struct iwl_mic_keys mic_keys; + struct iwl_p1k_cache tx; + struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; + struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; +} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ + +#define IWL_KCK_MAX_SIZE 32 +#define IWL_KEK_MAX_SIZE 32 + +struct iwl_wowlan_kek_kck_material_cmd { + u8 kck[IWL_KCK_MAX_SIZE]; + u8 kek[IWL_KEK_MAX_SIZE]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; +} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ + +#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 + +enum iwl_wowlan_rekey_status { + IWL_WOWLAN_REKEY_POST_REKEY = 0, + IWL_WOWLAN_REKEY_WHILE_REKEY = 1, +}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ + +enum iwl_wowlan_wakeup_reason { + IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, + IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), + IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), + IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), + IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), + IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), + IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), + IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), + IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), + IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), + IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), + +}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ + +struct iwl_wowlan_gtk_status { + u8 key_index; + u8 reserved[3]; + u8 decrypt_key[16]; + u8 tkip_mic_key[8]; + struct iwl_wowlan_rsc_tsc_params_cmd rsc; +} __packed; + +struct iwl_wowlan_status { + struct iwl_wowlan_gtk_status gtk; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ + +#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 +#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 +#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048 + +struct iwl_tcp_packet_info { + __le16 tcp_pseudo_header_checksum; + __le16 tcp_payload_length; +} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */ + +struct iwl_tcp_packet { + struct iwl_tcp_packet_info info; + u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN]; +} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ + +struct iwl_remote_wake_packet { + struct iwl_tcp_packet_info info; + u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN]; +} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ + +struct iwl_wowlan_remote_wake_config { + __le32 connection_max_time; /* unused */ + /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */ + u8 max_syn_retries; + u8 max_data_retries; + u8 tcp_syn_ack_timeout; + u8 tcp_ack_timeout; + + struct iwl_tcp_packet syn_tx; + struct iwl_tcp_packet synack_rx; + struct iwl_tcp_packet keepalive_ack_rx; + struct iwl_tcp_packet fin_tx; + + struct iwl_remote_wake_packet keepalive_tx; + struct iwl_remote_wake_packet wake_rx; + + /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */ + u8 sequence_number_offset; + u8 sequence_number_length; + u8 token_offset; + u8 token_length; + /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */ + __le32 initial_sequence_number; + __le16 keepalive_interval; + __le16 num_tokens; + u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS]; +} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */ + +/* TODO: NetDetect API */ + +#endif /* __fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h new file mode 100644 index 000000000000..f3f3ee0a766b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -0,0 +1,387 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_mac_h__ +#define __fw_api_mac_h__ + +/* + * The first MAC indices (starting from 0) + * are available to the driver, AUX follows + */ +#define MAC_INDEX_AUX 4 +#define MAC_INDEX_MIN_DRIVER 0 +#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX +#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1) + +enum iwl_ac { + AC_BK, + AC_BE, + AC_VI, + AC_VO, + AC_NUM, +}; + +/** + * enum iwl_mac_protection_flags - MAC context flags + * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, + * this will require CCK RTS/CTS2self. + * RTS/CTS will protect full burst time. + * @MAC_PROT_FLG_HT_PROT: enable HT protection + * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions + * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self + */ +enum iwl_mac_protection_flags { + MAC_PROT_FLG_TGG_PROTECT = BIT(3), + MAC_PROT_FLG_HT_PROT = BIT(23), + MAC_PROT_FLG_FAT_PROT = BIT(24), + MAC_PROT_FLG_SELF_CTS_EN = BIT(30), +}; + +#define MAC_FLG_SHORT_SLOT BIT(4) +#define MAC_FLG_SHORT_PREAMBLE BIT(5) + +/** + * enum iwl_mac_types - Supported MAC types + * @FW_MAC_TYPE_FIRST: lowest supported MAC type + * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) + * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) + * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS + * @FW_MAC_TYPE_IBSS: IBSS + * @FW_MAC_TYPE_BSS_STA: BSS (managed) station + * @FW_MAC_TYPE_P2P_DEVICE: P2P Device + * @FW_MAC_TYPE_P2P_STA: P2P client + * @FW_MAC_TYPE_GO: P2P GO + * @FW_MAC_TYPE_TEST: ? + * @FW_MAC_TYPE_MAX: highest support MAC type + */ +enum iwl_mac_types { + FW_MAC_TYPE_FIRST = 1, + FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, + FW_MAC_TYPE_LISTENER, + FW_MAC_TYPE_PIBSS, + FW_MAC_TYPE_IBSS, + FW_MAC_TYPE_BSS_STA, + FW_MAC_TYPE_P2P_DEVICE, + FW_MAC_TYPE_P2P_STA, + FW_MAC_TYPE_GO, + FW_MAC_TYPE_TEST, + FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST +}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ + +/** + * enum iwl_tsf_id - TSF hw timer ID + * @TSF_ID_A: use TSF A + * @TSF_ID_B: use TSF B + * @TSF_ID_C: use TSF C + * @TSF_ID_D: use TSF D + * @NUM_TSF_IDS: number of TSF timers available + */ +enum iwl_tsf_id { + TSF_ID_A = 0, + TSF_ID_B = 1, + TSF_ID_C = 2, + TSF_ID_D = 3, + NUM_TSF_IDS = 4, +}; /* TSF_ID_API_E_VER_1 */ + +/** + * struct iwl_mac_data_ap - configuration data for AP MAC context + * @beacon_time: beacon transmit time in system time + * @beacon_tsf: beacon transmit time in TSF + * @bi: beacon interval in TU + * @bi_reciprocal: 2^32 / bi + * @dtim_interval: dtim transmit time in TU + * @dtim_reciprocal: 2^32 / dtim_interval + * @mcast_qid: queue ID for multicast traffic + * @beacon_template: beacon template ID + */ +struct iwl_mac_data_ap { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 dtim_interval; + __le32 dtim_reciprocal; + __le32 mcast_qid; + __le32 beacon_template; +} __packed; /* AP_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_ibss - configuration data for IBSS MAC context + * @beacon_time: beacon transmit time in system time + * @beacon_tsf: beacon transmit time in TSF + * @bi: beacon interval in TU + * @bi_reciprocal: 2^32 / bi + * @beacon_template: beacon template ID + */ +struct iwl_mac_data_ibss { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 beacon_template; +} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_sta - configuration data for station MAC context + * @is_assoc: 1 for associated state, 0 otherwise + * @dtim_time: DTIM arrival time in system time + * @dtim_tsf: DTIM arrival time in TSF + * @bi: beacon interval in TU, applicable only when associated + * @bi_reciprocal: 2^32 / bi , applicable only when associated + * @dtim_interval: DTIM interval in TU, applicable only when associated + * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated + * @listen_interval: in beacon intervals, applicable only when associated + * @assoc_id: unique ID assigned by the AP during association + */ +struct iwl_mac_data_sta { + __le32 is_assoc; + __le32 dtim_time; + __le64 dtim_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 dtim_interval; + __le32 dtim_reciprocal; + __le32 listen_interval; + __le32 assoc_id; + __le32 assoc_beacon_arrive_time; +} __packed; /* STA_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_go - configuration data for P2P GO MAC context + * @ap: iwl_mac_data_ap struct with most config data + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + * @opp_ps_enabled: indicate that opportunistic PS allowed + */ +struct iwl_mac_data_go { + struct iwl_mac_data_ap ap; + __le32 ctwin; + __le32 opp_ps_enabled; +} __packed; /* GO_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context + * @sta: iwl_mac_data_sta struct with most config data + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + */ +struct iwl_mac_data_p2p_sta { + struct iwl_mac_data_sta sta; + __le32 ctwin; +} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_pibss - Pseudo IBSS config data + * @stats_interval: interval in TU between statistics notifications to host. + */ +struct iwl_mac_data_pibss { + __le32 stats_interval; +} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ + +/* + * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC + * context. + * @is_disc_extended: if set to true, P2P Device discoverability is enabled on + * other channels as well. This should be to true only in case that the + * device is discoverable and there is an active GO. Note that setting this + * field when not needed, will increase the number of interrupts and have + * effect on the platform power, as this setting opens the Rx filters on + * all macs. + */ +struct iwl_mac_data_p2p_dev { + __le32 is_disc_extended; +} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ + +/** + * enum iwl_mac_filter_flags - MAC context filter flags + * @MAC_FILTER_IN_PROMISC: accept all data frames + * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and + * control frames to the host + * @MAC_FILTER_ACCEPT_GRP: accept multicast frames + * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames + * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames + * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host + * (in station mode when associated) + * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames + * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames + * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host + */ +enum iwl_mac_filter_flags { + MAC_FILTER_IN_PROMISC = BIT(0), + MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), + MAC_FILTER_ACCEPT_GRP = BIT(2), + MAC_FILTER_DIS_DECRYPT = BIT(3), + MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), + MAC_FILTER_IN_BEACON = BIT(6), + MAC_FILTER_OUT_BCAST = BIT(8), + MAC_FILTER_IN_CRC32 = BIT(11), + MAC_FILTER_IN_PROBE_REQUEST = BIT(12), +}; + +/** + * enum iwl_mac_qos_flags - QoS flags + * @MAC_QOS_FLG_UPDATE_EDCA: ? + * @MAC_QOS_FLG_TGN: HT is enabled + * @MAC_QOS_FLG_TXOP_TYPE: ? + * + */ +enum iwl_mac_qos_flags { + MAC_QOS_FLG_UPDATE_EDCA = BIT(0), + MAC_QOS_FLG_TGN = BIT(1), + MAC_QOS_FLG_TXOP_TYPE = BIT(4), +}; + +/** + * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @fifos_mask: FIFOs used by this MAC for this AC + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * One instance of this config struct for each of 4 EDCA access categories + * in struct iwl_qosparam_cmd. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 fifos_mask; + __le16 edca_txop; +} __packed; /* AC_QOS_API_S_VER_2 */ + +/** + * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts + * ( MAC_CONTEXT_CMD = 0x28 ) + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @mac_type: one of FW_MAC_TYPE_* + * @tsd_id: TSF HW timer, one of TSF_ID_* + * @node_addr: MAC address + * @bssid_addr: BSSID + * @cck_rates: basic rates available for CCK + * @ofdm_rates: basic rates available for OFDM + * @protection_flags: combination of MAC_PROT_FLG_FLAG_* + * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise + * @short_slot: 0x10 for enabling short slots, 0 otherwise + * @filter_flags: combination of MAC_FILTER_* + * @qos_flags: from MAC_QOS_FLG_* + * @ac: one iwl_mac_qos configuration for each AC + * @mac_specific: one of struct iwl_mac_data_*, according to mac_type + */ +struct iwl_mac_ctx_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ + __le32 mac_type; + __le32 tsf_id; + u8 node_addr[6]; + __le16 reserved_for_node_addr; + u8 bssid_addr[6]; + __le16 reserved_for_bssid_addr; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 protection_flags; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 filter_flags; + /* MAC_QOS_PARAM_API_S_VER_1 */ + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM+1]; + /* MAC_CONTEXT_COMMON_DATA_API_S */ + union { + struct iwl_mac_data_ap ap; + struct iwl_mac_data_go go; + struct iwl_mac_data_sta sta; + struct iwl_mac_data_p2p_sta p2p_sta; + struct iwl_mac_data_p2p_dev p2p_dev; + struct iwl_mac_data_pibss pibss; + struct iwl_mac_data_ibss ibss; + }; +} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ + +static inline u32 iwl_mvm_reciprocal(u32 v) +{ + if (!v) + return 0; + return 0xFFFFFFFF / v; +} + +#define IWL_NONQOS_SEQ_GET 0x1 +#define IWL_NONQOS_SEQ_SET 0x2 +struct iwl_nonqos_seq_query_cmd { + __le32 get_set_flag; + __le32 mac_id_n_color; + __le16 value; + __le16 reserved; +} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ + +#endif /* __fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h new file mode 100644 index 000000000000..c8f3e2536cbb --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h @@ -0,0 +1,467 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_power_h__ +#define __fw_api_power_h__ + +/* Power Management Commands, Responses, Notifications */ + +/** + * enum iwl_ltr_config_flags - masks for LTR config command flags + * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status + * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow + * memory access + * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR + * reg change + * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from + * D0 to D3 + * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register + * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register + * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD + */ +enum iwl_ltr_config_flags { + LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), + LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), + LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), + LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), + LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), + LTR_CFG_FLAG_SW_SET_LONG = BIT(5), + LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), +}; + +/** + * struct iwl_ltr_config_cmd_v1 - configures the LTR + * @flags: See %enum iwl_ltr_config_flags + */ +struct iwl_ltr_config_cmd_v1 { + __le32 flags; + __le32 static_long; + __le32 static_short; +} __packed; /* LTR_CAPABLE_API_S_VER_1 */ + +#define LTR_VALID_STATES_NUM 4 + +/** + * struct iwl_ltr_config_cmd - configures the LTR + * @flags: See %enum iwl_ltr_config_flags + * @static_long: + * @static_short: + * @ltr_cfg_values: + * @ltr_short_idle_timeout: + */ +struct iwl_ltr_config_cmd { + __le32 flags; + __le32 static_long; + __le32 static_short; + __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; + __le32 ltr_short_idle_timeout; +} __packed; /* LTR_CAPABLE_API_S_VER_2 */ + +/* Radio LP RX Energy Threshold measured in dBm */ +#define POWER_LPRX_RSSI_THRESHOLD 75 +#define POWER_LPRX_RSSI_THRESHOLD_MAX 94 +#define POWER_LPRX_RSSI_THRESHOLD_MIN 30 + +/** + * enum iwl_power_flags - masks for power table command flags + * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off + * receiver and transmitter. '0' - does not allow. + * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, + * '1' Driver enables PM (use rest of parameters) + * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, + * '1' PM could sleep over DTIM till listen Interval. + * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all + * access categories are both delivery and trigger enabled. + * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and + * PBW Snoozing enabled + * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask + * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. + * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving + * detection enablement +*/ +enum iwl_power_flags { + POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), + POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), + POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), + POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), + POWER_FLAGS_BT_SCO_ENA = BIT(8), + POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), + POWER_FLAGS_LPRX_ENA_MSK = BIT(11), + POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), +}; + +#define IWL_POWER_VEC_SIZE 5 + +/** + * struct iwl_powertable_cmd - legacy power command. Beside old API support this + * is used also with a new power API for device wide power settings. + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * @flags: Power table command flags from POWER_FLAGS_* + * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. + * Minimum allowed:- 3 * DTIM. Keep alive period must be + * set regardless of power scheme or current power state. + * FW use this value also when PM is disabled. + * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to + * PSM transition - legacy PM + * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to + * PSM transition - legacy PM + * @sleep_interval: not in use + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). + * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. + * Default: 80dbm + */ +struct iwl_powertable_cmd { + /* PM_POWER_TABLE_CMD_API_S_VER_6 */ + __le16 flags; + u8 keep_alive_seconds; + u8 debug_flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 skip_dtim_periods; + __le32 lprx_rssi_threshold; +} __packed; + +/** + * enum iwl_device_power_flags - masks for device power command flags + * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off + * receiver and transmitter. '0' - does not allow. +*/ +enum iwl_device_power_flags { + DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), +}; + +/** + * struct iwl_device_power_cmd - device wide power command. + * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) + * + * @flags: Power table command flags from DEVICE_POWER_FLAGS_* + */ +struct iwl_device_power_cmd { + /* PM_POWER_TABLE_CMD_API_S_VER_6 */ + __le16 flags; + __le16 reserved; +} __packed; + +/** + * struct iwl_mac_power_cmd - New power command containing uAPSD support + * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) + * @id_and_color: MAC contex identifier + * @flags: Power table command flags from POWER_FLAGS_* + * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. + * Minimum allowed:- 3 * DTIM. Keep alive period must be + * set regardless of power scheme or current power state. + * FW use this value also when PM is disabled. + * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to + * PSM transition - legacy PM + * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to + * PSM transition - legacy PM + * @sleep_interval: not in use + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). + * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to + * PSM transition - uAPSD + * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to + * PSM transition - uAPSD + * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. + * Default: 80dbm + * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set + * @snooze_interval: Maximum time between attempts to retrieve buffered data + * from the AP [msec] + * @snooze_window: A window of time in which PBW snoozing insures that all + * packets received. It is also the minimum time from last + * received unicast RX packet, before client stops snoozing + * for data. [msec] + * @snooze_step: TBD + * @qndp_tid: TID client shall use for uAPSD QNDP triggers + * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for + * each corresponding AC. + * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. + * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct + * values. + * @heavy_tx_thld_packets: TX threshold measured in number of packets + * @heavy_rx_thld_packets: RX threshold measured in number of packets + * @heavy_tx_thld_percentage: TX threshold measured in load's percentage + * @heavy_rx_thld_percentage: RX threshold measured in load's percentage + * @limited_ps_threshold: +*/ +struct iwl_mac_power_cmd { + /* CONTEXT_DESC_API_T_VER_1 */ + __le32 id_and_color; + + /* CLIENT_PM_POWER_TABLE_S_VER_1 */ + __le16 flags; + __le16 keep_alive_seconds; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 rx_data_timeout_uapsd; + __le32 tx_data_timeout_uapsd; + u8 lprx_rssi_threshold; + u8 skip_dtim_periods; + __le16 snooze_interval; + __le16 snooze_window; + u8 snooze_step; + u8 qndp_tid; + u8 uapsd_ac_flags; + u8 uapsd_max_sp; + u8 heavy_tx_thld_packets; + u8 heavy_rx_thld_packets; + u8 heavy_tx_thld_percentage; + u8 heavy_rx_thld_percentage; + u8 limited_ps_threshold; + u8 reserved; +} __packed; + +/* + * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when + * associated AP is identified as improperly implementing uAPSD protocol. + * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 + * @sta_id: index of station in uCode's station table - associated AP ID in + * this context. + */ +struct iwl_uapsd_misbehaving_ap_notif { + __le32 sta_id; + u8 mac_id; + u8 reserved[3]; +} __packed; + +/** + * struct iwl_reduce_tx_power_cmd - TX power reduction command + * REDUCE_TX_POWER_CMD = 0x9f + * @flags: (reserved for future implementation) + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in dBms. + */ +struct iwl_reduce_tx_power_cmd { + u8 flags; + u8 mac_context_id; + __le16 pwr_restriction; +} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ + +enum iwl_dev_tx_power_cmd_mode { + IWL_TX_POWER_MODE_SET_MAC = 0, + IWL_TX_POWER_MODE_SET_DEVICE = 1, + IWL_TX_POWER_MODE_SET_CHAINS = 2, +}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */; + +/** + * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command + * @set_mode: see &enum iwl_dev_tx_power_cmd_mode + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in 1/8 dBms. + * @dev_24: device TX power restriction in 1/8 dBms + * @dev_52_low: device TX power restriction upper band - low + * @dev_52_high: device TX power restriction upper band - high + */ +struct iwl_dev_tx_power_cmd_v2 { + __le32 set_mode; + __le32 mac_context_id; + __le16 pwr_restriction; + __le16 dev_24; + __le16 dev_52_low; + __le16 dev_52_high; +} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */ + +#define IWL_NUM_CHAIN_LIMITS 2 +#define IWL_NUM_SUB_BANDS 5 + +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * @v2: version 2 of the command, embedded here for easier software handling + * @per_chain_restriction: per chain restrictions + */ +struct iwl_dev_tx_power_cmd { + /* v3 is just an extension of v2 - keep this here */ + struct iwl_dev_tx_power_cmd_v2 v2; + __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; +} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ + +#define IWL_DEV_MAX_TX_POWER 0x7FFF + +/** + * struct iwl_beacon_filter_cmd + * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) + * @id_and_color: MAC contex identifier + * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon + * to driver if delta in Energy values calculated for this and last + * passed beacon is greater than this threshold. Zero value means that + * the Energy change is ignored for beacon filtering, and beacon will + * not be forced to be sent to driver regardless of this delta. Typical + * energy delta 5dB. + * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. + * Send beacon to driver if delta in Energy values calculated for this + * and last passed beacon is greater than this threshold. Zero value + * means that the Energy change is ignored for beacon filtering while in + * Roaming state, typical energy delta 1dB. + * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values + * calculated for current beacon is less than the threshold, use + * Roaming Energy Delta Threshold, otherwise use normal Energy Delta + * Threshold. Typical energy threshold is -72dBm. + * @bf_temp_threshold: This threshold determines the type of temperature + * filtering (Slow or Fast) that is selected (Units are in Celsuis): + * If the current temperature is above this threshold - Fast filter + * will be used, If the current temperature is below this threshold - + * Slow filter will be used. + * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature change is ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. + * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature change is ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. + * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. + * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed + * for a specific period of time. Units: Beacons. + * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed + * for a longer period of time then this escape-timeout. Units: Beacons. + * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. + */ +struct iwl_beacon_filter_cmd { + __le32 bf_energy_delta; + __le32 bf_roaming_energy_delta; + __le32 bf_roaming_state; + __le32 bf_temp_threshold; + __le32 bf_temp_fast_filter; + __le32 bf_temp_slow_filter; + __le32 bf_enable_beacon_filter; + __le32 bf_debug_flag; + __le32 bf_escape_timer; + __le32 ba_escape_timer; + __le32 ba_enable_beacon_abort; +} __packed; + +/* Beacon filtering and beacon abort */ +#define IWL_BF_ENERGY_DELTA_DEFAULT 5 +#define IWL_BF_ENERGY_DELTA_D0I3 20 +#define IWL_BF_ENERGY_DELTA_MAX 255 +#define IWL_BF_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 +#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 +#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 +#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_STATE_DEFAULT 72 +#define IWL_BF_ROAMING_STATE_D0I3 72 +#define IWL_BF_ROAMING_STATE_MAX 255 +#define IWL_BF_ROAMING_STATE_MIN 0 + +#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 +#define IWL_BF_TEMP_THRESHOLD_D0I3 112 +#define IWL_BF_TEMP_THRESHOLD_MAX 255 +#define IWL_BF_TEMP_THRESHOLD_MIN 0 + +#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 +#define IWL_BF_TEMP_FAST_FILTER_D0I3 1 +#define IWL_BF_TEMP_FAST_FILTER_MAX 255 +#define IWL_BF_TEMP_FAST_FILTER_MIN 0 + +#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 +#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 +#define IWL_BF_TEMP_SLOW_FILTER_MAX 255 +#define IWL_BF_TEMP_SLOW_FILTER_MIN 0 + +#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 + +#define IWL_BF_DEBUG_FLAG_DEFAULT 0 +#define IWL_BF_DEBUG_FLAG_D0I3 0 + +#define IWL_BF_ESCAPE_TIMER_DEFAULT 0 +#define IWL_BF_ESCAPE_TIMER_D0I3 0 +#define IWL_BF_ESCAPE_TIMER_MAX 1024 +#define IWL_BF_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 +#define IWL_BA_ESCAPE_TIMER_D0I3 6 +#define IWL_BA_ESCAPE_TIMER_D3 9 +#define IWL_BA_ESCAPE_TIMER_MAX 1024 +#define IWL_BA_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 + +#define IWL_BF_CMD_CONFIG(mode) \ + .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ + .bf_roaming_energy_delta = \ + cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ + .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ + .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ + .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ + .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ + .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ + .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) + +#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) +#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h new file mode 100644 index 000000000000..0f1ea80a55ef --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h @@ -0,0 +1,389 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_rs_h__ +#define __fw_api_rs_h__ + +#include "fw-api-mac.h" + +/* + * These serve as indexes into + * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; + * TODO: avoid overlap between legacy and HT rates + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, + IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, + IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, + IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, + IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, + IWL_RATE_MCS_8_INDEX, + IWL_RATE_MCS_9_INDEX, + IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, + IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, + IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, +}; + +#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) + +/* fw API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + IWL_RATE_INVM_PLCP = -1, +}; + +/* + * rate_n_flags bit fields + * + * The 32-bit value has different layouts in the low 8 bites depending on the + * format. There are three formats, HT, VHT and legacy (11abg, with subformats + * for CCK and OFDM). + * + * High-throughput (HT) rate format + * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) + * Very High-throughput (VHT) rate format + * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) + * Legacy OFDM rate format for bits 7:0 + * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) + * Legacy CCK rate format for bits 7:0: + * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) + */ + +/* Bit 8: (1) HT format, (0) legacy or VHT format */ +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) + +/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ +#define RATE_MCS_VHT_POS 26 +#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) + + +/* + * High-throughput (HT) rate format for bits 7:0 + * + * 2-0: MCS rate base + * 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * (bits 7-6 are zero) + * + * Together the low 5 bits work out to the MCS index because we don't + * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two + * streams and 16-23 have three streams. We could also support MCS 32 + * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) + */ +#define RATE_HT_MCS_RATE_CODE_MSK 0x7 +#define RATE_HT_MCS_NSS_POS 3 +#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS) + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_HT_MCS_GF_POS 10 +#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) + +#define RATE_HT_MCS_INDEX_MSK 0x3f + +/* + * Very High-throughput (VHT) rate format for bits 7:0 + * + * 3-0: VHT MCS (0-9) + * 5-4: number of streams - 1: + * 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + */ + +/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ +#define RATE_VHT_MCS_RATE_CODE_MSK 0xf +#define RATE_VHT_MCS_NSS_POS 4 +#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) + +/* + * Legacy OFDM rate format for bits 7:0 + * + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * (bits 7-4 are 0) + * + * Legacy CCK rate format for bits 7:0: + * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): + * + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + * (bit 7 is 0) + */ +#define RATE_LEGACY_RATE_MSK 0xff + + +/* + * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz + * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT + */ +#define RATE_MCS_CHAN_WIDTH_POS 11 +#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) + +/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ + RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ + RATE_MCS_ANT_C_MSK) +#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK +#define RATE_MCS_ANT_NUM 3 + +/* Bit 17-18: (0) SS, (1) SS*2 */ +#define RATE_MCS_STBC_POS 17 +#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS) +#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS) + +/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ +#define RATE_MCS_BF_POS 19 +#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) + +/* Bit 20: (0) ZLF is off, (1) ZLF is on */ +#define RATE_MCS_ZLF_POS 20 +#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS) + +/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ +#define RATE_MCS_DUP_POS 24 +#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) + +/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ +#define RATE_MCS_LDPC_POS 27 +#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) + + +/* Link Quality definitions */ + +/* # entries in rate scale table to support Tx retries */ +#define LQ_MAX_RETRY_NUM 16 + +/* Link quality command flags bit fields */ + +/* Bit 0: (0) Don't use RTS (1) Use RTS */ +#define LQ_FLAG_USE_RTS_POS 0 +#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) + +/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ +#define LQ_FLAG_COLOR_POS 1 +#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) + +/* Bit 4-5: Tx RTS BW Signalling + * (0) No RTS BW signalling + * (1) Static BW signalling + * (2) Dynamic BW signalling + */ +#define LQ_FLAG_RTS_BW_SIG_POS 4 +#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) +#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) +#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) + +/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection + * Dyanmic BW selection allows Tx with narrower BW then requested in rates + */ +#define LQ_FLAG_DYNAMIC_BW_POS 6 +#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) + +/* Single Stream Tx Parameters (lq_cmd->ss_params) + * Flags to control a smart FW decision about whether BFER/STBC/SISO will be + * used for single stream Tx. + */ + +/* Bit 0-1: Max STBC streams allowed. Can be 0-3. + * (0) - No STBC allowed + * (1) - 2x1 STBC allowed (HT/VHT) + * (2) - 4x2 STBC allowed (HT/VHT) + * (3) - 3x2 STBC allowed (HT only) + * All our chips are at most 2 antennas so only (1) is valid for now. + */ +#define LQ_SS_STBC_ALLOWED_POS 0 +#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) + +/* 2x1 STBC is allowed */ +#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) + +/* Bit 2: Beamformer (VHT only) is allowed */ +#define LQ_SS_BFER_ALLOWED_POS 2 +#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) + +/* Bit 3: Force BFER or STBC for testing + * If this is set: + * If BFER is allowed then force the ucode to choose BFER else + * If STBC is allowed then force the ucode to choose STBC over SISO + */ +#define LQ_SS_FORCE_POS 3 +#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) + +/* Bit 31: ss_params field is valid. Used for FW backward compatibility + * with other drivers which don't support the ss_params API yet + */ +#define LQ_SS_PARAMS_VALID_POS 31 +#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) + +/** + * struct iwl_lq_cmd - link quality command + * @sta_id: station to update + * @control: not used + * @flags: combination of LQ_FLAG_* + * @mimo_delim: the first SISO index in rs_table, which separates MIMO + * and SISO rates + * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). + * Should be ANT_[ABC] + * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] + * @initial_rate_index: first index from rs_table per AC category + * @agg_time_limit: aggregation max time threshold in usec/100, meaning + * value of 100 is one usec. Range is 100 to 8000 + * @agg_disable_start_th: try-count threshold for starting aggregation. + * If a frame has higher try-count, it should not be selected for + * starting an aggregation sequence. + * @agg_frame_cnt_limit: max frame count in an aggregation. + * 0: no limit + * 1: no aggregation (one frame per aggregation) + * 2 - 0x3f: maximal number of frames (up to 3f == 63) + * @rs_table: array of rates for each TX try, each is rate_n_flags, + * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP + * @ss_params: single stream features. declare whether STBC or BFER are allowed. + */ +struct iwl_lq_cmd { + u8 sta_id; + u8 reduced_tpc; + u16 control; + /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ + u8 flags; + u8 mimo_delim; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 initial_rate_index[AC_NUM]; + /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ + __le16 agg_time_limit; + u8 agg_disable_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved2; + __le32 rs_table[LQ_MAX_RETRY_NUM]; + __le32 ss_params; +}; /* LINK_QUALITY_CMD_API_S_VER_1 */ +#endif /* __fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h new file mode 100644 index 000000000000..9b7e49d4620f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -0,0 +1,238 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_rx_h__ +#define __fw_api_rx_h__ + +#define IWL_RX_INFO_PHY_CNT 8 +#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 +#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff +#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 +#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 +#define IWL_RX_INFO_ENERGY_ANT_A_POS 0 +#define IWL_RX_INFO_ENERGY_ANT_B_POS 8 +#define IWL_RX_INFO_ENERGY_ANT_C_POS 16 + +/** + * struct iwl_rx_phy_info - phy info + * (REPLY_RX_PHY_CMD = 0xc0) + * @non_cfg_phy_cnt: non configurable DSP phy data byte count + * @cfg_phy_cnt: configurable DSP phy data byte count + * @stat_id: configurable DSP phy data set ID + * @reserved1: + * @system_timestamp: GP2 at on air rise + * @timestamp: TSF at on air rise + * @beacon_time_stamp: beacon at on-air rise + * @phy_flags: general phy flags: band, modulation, ... + * @channel: channel number + * @non_cfg_phy_buf: for various implementations of non_cfg_phy + * @rate_n_flags: RATE_MCS_* + * @byte_count: frame's byte-count + * @frame_time: frame's time on the air, based on byte count and frame rate + * calculation + * @mac_active_msk: what MACs were active when the frame was received + * + * Before each Rx, the device sends this data. It contains PHY information + * about the reception of the packet. + */ +struct iwl_rx_phy_info { + u8 non_cfg_phy_cnt; + u8 cfg_phy_cnt; + u8 stat_id; + u8 reserved1; + __le32 system_timestamp; + __le64 timestamp; + __le32 beacon_time_stamp; + __le16 phy_flags; + __le16 channel; + __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; + __le32 rate_n_flags; + __le32 byte_count; + __le16 mac_active_msk; + __le16 frame_time; +} __packed; + +/* + * TCP offload Rx assist info + * + * bits 0:3 - reserved + * bits 4:7 - MIC CRC length + * bits 8:12 - MAC header length + * bit 13 - Padding indication + * bit 14 - A-AMSDU indication + * bit 15 - Offload enabled + */ +enum iwl_csum_rx_assist_info { + CSUM_RXA_RESERVED_MASK = 0x000f, + CSUM_RXA_MICSIZE_MASK = 0x00f0, + CSUM_RXA_HEADERLEN_MASK = 0x1f00, + CSUM_RXA_PADD = BIT(13), + CSUM_RXA_AMSDU = BIT(14), + CSUM_RXA_ENA = BIT(15) +}; + +/** + * struct iwl_rx_mpdu_res_start - phy info + * @assist: see CSUM_RX_ASSIST_ above + */ +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 assist; +} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ + +/** + * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags + * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band + * @RX_RES_PHY_FLAGS_MOD_CCK: + * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short + * @RX_RES_PHY_FLAGS_NARROW_BAND: + * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received + * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU + * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame + * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble + * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame + */ +enum iwl_rx_phy_flags { + RX_RES_PHY_FLAGS_BAND_24 = BIT(0), + RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), + RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), + RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), + RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), + RX_RES_PHY_FLAGS_ANTENNA_POS = 4, + RX_RES_PHY_FLAGS_AGG = BIT(7), + RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), + RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), + RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), +}; + +/** + * enum iwl_mvm_rx_status - written by fw for each Rx packet + * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine + * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow + * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: + * @RX_MPDU_RES_STATUS_KEY_VALID: + * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: + * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed + * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked + * in the driver. + * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine + * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or + * alg = CCM only. Checks replay attack for 11w frames. Relevant only if + * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. + * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted + * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP + * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM + * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP + * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC + * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted + * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm + * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted + * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP: + * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: + * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: + * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame + * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw + * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors + * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK: + * @RX_MPDU_RES_STATUS_STA_ID_MSK: + * @RX_MPDU_RES_STATUS_RRF_KILL: + * @RX_MPDU_RES_STATUS_FILTERING_MSK: + * @RX_MPDU_RES_STATUS2_FILTERING_MSK: + */ +enum iwl_mvm_rx_status { + RX_MPDU_RES_STATUS_CRC_OK = BIT(0), + RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), + RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), + RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), + RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), + RX_MPDU_RES_STATUS_ICV_OK = BIT(5), + RX_MPDU_RES_STATUS_MIC_OK = BIT(6), + RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), + RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), + RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), + RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), + RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), + RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), + RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8), + RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8), + RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), + RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), + RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), + RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12), + RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), + RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), + RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), + RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), + RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), + RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000), + RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000), + RX_MPDU_RES_STATUS_RRF_KILL = BIT(29), + RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), + RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), +}; + +#endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h new file mode 100644 index 000000000000..3a657e4b60ac --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -0,0 +1,730 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_scan_h__ +#define __fw_api_scan_h__ + +#include "fw-api.h" + +/* Scan Commands, Responses, Notifications */ + +/* Max number of IEs for direct SSID scans in a command */ +#define PROBE_OPTION_MAX 20 + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD, + * selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 entries. + * SSID IEs get transmitted in reverse order of entry. + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ + +/* scan offload */ +#define IWL_SCAN_MAX_BLACKLIST_LEN 64 +#define IWL_SCAN_SHORT_BLACKLIST_LEN 16 +#define IWL_SCAN_MAX_PROFILES 11 +#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 + +/* Default watchdog (in MS) for scheduled scan iteration */ +#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) + +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define CAN_ABORT_STATUS 1 + +#define IWL_FULL_SCAN_MULTIPLIER 5 +#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 +#define IWL_MAX_SCHED_SCAN_PLANS 2 + +enum scan_framework_client { + SCAN_CLIENT_SCHED_SCAN = BIT(0), + SCAN_CLIENT_NETDETECT = BIT(1), + SCAN_CLIENT_ASSET_TRACKING = BIT(2), +}; + +/** + * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S + * @ssid: MAC address to filter out + * @reported_rssi: AP rssi reported to the host + * @client_bitmap: clients ignore this entry - enum scan_framework_client + */ +struct iwl_scan_offload_blacklist { + u8 ssid[ETH_ALEN]; + u8 reported_rssi; + u8 client_bitmap; +} __packed; + +enum iwl_scan_offload_network_type { + IWL_NETWORK_TYPE_BSS = 1, + IWL_NETWORK_TYPE_IBSS = 2, + IWL_NETWORK_TYPE_ANY = 3, +}; + +enum iwl_scan_offload_band_selection { + IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, + IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, + IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, +}; + +/** + * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S + * @ssid_index: index to ssid list in fixed part + * @unicast_cipher: encryption algorithm to match - bitmap + * @aut_alg: authentication algorithm to match - bitmap + * @network_type: enum iwl_scan_offload_network_type + * @band_selection: enum iwl_scan_offload_band_selection + * @client_bitmap: clients waiting for match - enum scan_framework_client + */ +struct iwl_scan_offload_profile { + u8 ssid_index; + u8 unicast_cipher; + u8 auth_alg; + u8 network_type; + u8 band_selection; + u8 client_bitmap; + u8 reserved[2]; +} __packed; + +/** + * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1 + * @blaclist: AP list to filter off from scan results + * @profiles: profiles to search for match + * @blacklist_len: length of blacklist + * @num_profiles: num of profiles in the list + * @match_notify: clients waiting for match found notification + * @pass_match: clients waiting for the results + * @active_clients: active clients bitmap - enum scan_framework_client + * @any_beacon_notify: clients waiting for match notification without match + */ +struct iwl_scan_offload_profile_cfg { + struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; + u8 blacklist_len; + u8 num_profiles; + u8 match_notify; + u8 pass_match; + u8 active_clients; + u8 any_beacon_notify; + u8 reserved[2]; +} __packed; + +/** + * iwl_scan_schedule_lmac - schedule of scan offload + * @delay: delay between iterations, in seconds. + * @iterations: num of scan iterations + * @full_scan_mul: number of partial scans before each full scan + */ +struct iwl_scan_schedule_lmac { + __le16 delay; + u8 iterations; + u8 full_scan_mul; +} __packed; /* SCAN_SCHEDULE_API_S */ + +enum iwl_scan_offload_complete_status { + IWL_SCAN_OFFLOAD_COMPLETED = 1, + IWL_SCAN_OFFLOAD_ABORTED = 2, +}; + +enum iwl_scan_ebs_status { + IWL_SCAN_EBS_SUCCESS, + IWL_SCAN_EBS_FAILED, + IWL_SCAN_EBS_CHAN_NOT_FOUND, + IWL_SCAN_EBS_INACTIVE, +}; + +/** + * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S + * @tx_flags: combination of TX_CMD_FLG_* + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @sta_id: index of destination station in FW station table + * @reserved: for alignment and future use + */ +struct iwl_scan_req_tx_cmd { + __le32 tx_flags; + __le32 rate_n_flags; + u8 sta_id; + u8 reserved[3]; +} __packed; + +enum iwl_scan_channel_flags_lmac { + IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27), + IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28), +}; + +/** + * iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2 + * @flags: bits 1-20: directed scan to i'th ssid + * other bits &enum iwl_scan_channel_flags_lmac + * @channel_number: channel number 1-13 etc + * @iter_count: scan iteration on this channel + * @iter_interval: interval in seconds between iterations on one channel + */ +struct iwl_scan_channel_cfg_lmac { + __le32 flags; + __le16 channel_num; + __le16 iter_count; + __le32 iter_interval; +} __packed; + +/* + * iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 + * @offset: offset in the data block + * @len: length of the segment + */ +struct iwl_scan_probe_segment { + __le16 offset; + __le16 len; +} __packed; + +/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 + * @mac_header: first (and common) part of the probe + * @band_data: band specific data + * @common_data: last (and common) part of the probe + * @buf: raw data block + */ +struct iwl_scan_probe_req { + struct iwl_scan_probe_segment mac_header; + struct iwl_scan_probe_segment band_data[2]; + struct iwl_scan_probe_segment common_data; + u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; +} __packed; + +enum iwl_scan_channel_flags { + IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), +}; + +/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S + * @flags: enum iwl_scan_channel_flags + * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is + * involved. + * 1 - EBS is disabled. + * 2 - every second scan will be full scan(and so on). + */ +struct iwl_scan_channel_opt { + __le16 flags; + __le16 non_ebs_ratio; +} __packed; + +/** + * iwl_mvm_lmac_scan_flags + * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses + * without filtering. + * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels + * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan + * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification + * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching + * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented + * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report + * and DS parameter set IEs into probe requests. + * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches + */ +enum iwl_mvm_lmac_scan_flags { + IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), + IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1), + IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2), + IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), + IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), + IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), + IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), + IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), +}; + +enum iwl_scan_priority { + IWL_SCAN_PRIORITY_LOW, + IWL_SCAN_PRIORITY_MEDIUM, + IWL_SCAN_PRIORITY_HIGH, +}; + +enum iwl_scan_priority_ext { + IWL_SCAN_PRIORITY_EXT_0_LOWEST, + IWL_SCAN_PRIORITY_EXT_1, + IWL_SCAN_PRIORITY_EXT_2, + IWL_SCAN_PRIORITY_EXT_3, + IWL_SCAN_PRIORITY_EXT_4, + IWL_SCAN_PRIORITY_EXT_5, + IWL_SCAN_PRIORITY_EXT_6, + IWL_SCAN_PRIORITY_EXT_7_HIGHEST, +}; + +/** + * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 + * @reserved1: for alignment and future use + * @channel_num: num of channels to scan + * @active-dwell: dwell time for active channels + * @passive-dwell: dwell time for passive channels + * @fragmented-dwell: dwell time for fragmented passive scan + * @reserved2: for alignment and future use + * @rx_chain_selct: PHY_RX_CHAIN_* flags + * @scan_flags: &enum iwl_mvm_lmac_scan_flags + * @max_out_time: max time (in TU) to be out of associated channel + * @suspend_time: pause scan this long (TUs) when returning to service channel + * @flags: RXON flags + * @filter_flags: RXON filter + * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz + * @direct_scan: list of SSIDs for directed active scan + * @scan_prio: enum iwl_scan_priority + * @iter_num: number of scan iterations + * @delay: delay in seconds before first iteration + * @schedule: two scheduling plans. The first one is finite, the second one can + * be infinite. + * @channel_opt: channel optimization options, for full and partial scan + * @data: channel configuration and probe request packet. + */ +struct iwl_scan_req_lmac { + /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */ + __le32 reserved1; + u8 n_channels; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + __le16 reserved2; + __le16 rx_chain_select; + __le32 scan_flags; + __le32 max_out_time; + __le32 suspend_time; + /* RX_ON_FLAGS_API_S_VER_1 */ + __le32 flags; + __le32 filter_flags; + struct iwl_scan_req_tx_cmd tx_cmd[2]; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + __le32 scan_prio; + /* SCAN_REQ_PERIODIC_PARAMS_API_S */ + __le32 iter_num; + __le32 delay; + struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS]; + struct iwl_scan_channel_opt channel_opt[2]; + u8 data[]; +} __packed; + +/** + * struct iwl_scan_results_notif - scan results for one channel - + * SCAN_RESULT_NTF_API_S_VER_3 + * @channel: which channel the results are from + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz + * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request + * @num_probe_not_sent: # of request that weren't sent due to not enough time + * @duration: duration spent in channel, in usecs + */ +struct iwl_scan_results_notif { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; + __le32 duration; +} __packed; + +/** + * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels) + * SCAN_COMPLETE_NTF_API_S_VER_3 + * @scanned_channels: number of channels scanned (and number of valid results) + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @results: an array of scan results, only "scanned_channels" of them are valid + */ +struct iwl_lmac_scan_complete_notif { + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[]; +} __packed; + +/** + * iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 + * @last_schedule_line: last schedule line executed (fast or regular) + * @last_schedule_iteration: last scan iteration executed before scan abort + * @status: enum iwl_scan_offload_complete_status + * @ebs_status: EBS success status &enum iwl_scan_ebs_status + * @time_after_last_iter; time in seconds elapsed after last iteration + */ +struct iwl_periodic_scan_complete { + u8 last_schedule_line; + u8 last_schedule_iteration; + u8 status; + u8 ebs_status; + __le32 time_after_last_iter; + __le32 reserved; +} __packed; + +/* UMAC Scan API */ + +/* The maximum of either of these cannot exceed 8, because we use an + * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). + */ +#define IWL_MVM_MAX_UMAC_SCANS 8 +#define IWL_MVM_MAX_LMAC_SCANS 1 + +enum scan_config_flags { + SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), + SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), + SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), + SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), + SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), + SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), + SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), + SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), + SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), + SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), + SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), + SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), + SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), + SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), + SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), + + /* Bits 26-31 are for num of channels in channel_array */ +#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) +}; + +enum scan_config_rates { + /* OFDM basic rates */ + SCAN_CONFIG_RATE_6M = BIT(0), + SCAN_CONFIG_RATE_9M = BIT(1), + SCAN_CONFIG_RATE_12M = BIT(2), + SCAN_CONFIG_RATE_18M = BIT(3), + SCAN_CONFIG_RATE_24M = BIT(4), + SCAN_CONFIG_RATE_36M = BIT(5), + SCAN_CONFIG_RATE_48M = BIT(6), + SCAN_CONFIG_RATE_54M = BIT(7), + /* CCK basic rates */ + SCAN_CONFIG_RATE_1M = BIT(8), + SCAN_CONFIG_RATE_2M = BIT(9), + SCAN_CONFIG_RATE_5M = BIT(10), + SCAN_CONFIG_RATE_11M = BIT(11), + + /* Bits 16-27 are for supported rates */ +#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) +}; + +enum iwl_channel_flags { + IWL_CHANNEL_FLAG_EBS = BIT(0), + IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), + IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), +}; + +/** + * struct iwl_scan_config + * @flags: enum scan_config_flags + * @tx_chains: valid_tx antenna - ANT_* definitions + * @rx_chains: valid_rx antenna - ANT_* definitions + * @legacy_rates: default legacy rates - enum scan_config_rates + * @out_of_channel_time: default max out of serving channel time + * @suspend_time: default max suspend time + * @dwell_active: default dwell time for active scan + * @dwell_passive: default dwell time for passive scan + * @dwell_fragmented: default dwell time for fragmented scan + * @reserved: for future use and alignment + * @mac_addr: default mac address to be used in probes + * @bcast_sta_id: the index of the station in the fw + * @channel_flags: default channel flags - enum iwl_channel_flags + * scan_config_channel_flag + * @channel_array: default supported channels + */ +struct iwl_scan_config { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time; + __le32 suspend_time; + u8 dwell_active; + u8 dwell_passive; + u8 dwell_fragmented; + u8 reserved; + u8 mac_addr[ETH_ALEN]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S */ + +/** + * iwl_umac_scan_flags + *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request + * can be preempted by other scan requests with higher priority. + * The low priority scan will be resumed when the higher proirity scan is + * completed. + *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver + * when scan starts. + */ +enum iwl_umac_scan_flags { + IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), + IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), +}; + +enum iwl_umac_scan_uid_offsets { + IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, + IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, +}; + +enum iwl_umac_scan_general_flags { + IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), + IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), + IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), + IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), + IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), + IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), + IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9) +}; + +/** + * struct iwl_scan_channel_cfg_umac + * @flags: bitmap - 0-19: directed scan to i'th ssid. + * @channel_num: channel number 1-13 etc. + * @iter_count: repetition count for the channel. + * @iter_interval: interval between two scan iterations on one channel. + */ +struct iwl_scan_channel_cfg_umac { + __le32 flags; + u8 channel_num; + u8 iter_count; + __le16 iter_interval; +} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */ + +/** + * struct iwl_scan_umac_schedule + * @interval: interval in seconds between scan iterations + * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop + * @reserved: for alignment and future use + */ +struct iwl_scan_umac_schedule { + __le16 interval; + u8 iter_count; + u8 reserved; +} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ + +/** + * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command + * parameters following channels configuration array. + * @schedule: two scheduling plans. + * @delay: delay in TUs before starting the first scan iteration + * @reserved: for future use and alignment + * @preq: probe request with IEs blocks + * @direct_scan: list of SSIDs for directed active scan + */ +struct iwl_scan_req_umac_tail { + /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ + struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; + __le16 delay; + __le16 reserved; + /* SCAN_PROBE_PARAMS_API_S_VER_1 */ + struct iwl_scan_probe_req preq; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; +} __packed; + +/** + * struct iwl_scan_req_umac + * @flags: &enum iwl_umac_scan_flags + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @ooc_priority: out of channel priority - &enum iwl_scan_priority + * @general_flags: &enum iwl_umac_scan_general_flags + * @reserved1: for future use and alignment + * @active_dwell: dwell time for active scan + * @passive_dwell: dwell time for passive scan + * @fragmented_dwell: dwell time for fragmented passive scan + * @max_out_time: max out of serving channel time + * @suspend_time: max suspend time + * @scan_priority: scan internal prioritization &enum iwl_scan_priority + * @channel_flags: &enum iwl_scan_channel_flags + * @n_channels: num of channels in scan request + * @reserved2: for future use and alignment + * @data: &struct iwl_scan_channel_cfg_umac and + * &struct iwl_scan_req_umac_tail + */ +struct iwl_scan_req_umac { + __le32 flags; + __le32 uid; + __le32 ooc_priority; + /* SCAN_GENERAL_PARAMS_API_S_VER_1 */ + __le32 general_flags; + u8 reserved1; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + __le32 max_out_time; + __le32 suspend_time; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved2; + u8 data[]; +} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + +/** + * struct iwl_umac_scan_abort + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @flags: reserved + */ +struct iwl_umac_scan_abort { + __le32 uid; + __le32 flags; +} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ + +/** + * struct iwl_umac_scan_complete + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @last_schedule: last scheduling line + * @last_iter: last scan iteration number + * @scan status: &enum iwl_scan_offload_complete_status + * @ebs_status: &enum iwl_scan_ebs_status + * @time_from_last_iter: time elapsed from last iteration + * @reserved: for future use + */ +struct iwl_umac_scan_complete { + __le32 uid; + u8 last_schedule; + u8 last_iter; + u8 status; + u8 ebs_status; + __le32 time_from_last_iter; + __le32 reserved; +} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ + +#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5 +/** + * struct iwl_scan_offload_profile_match - match information + * @bssid: matched bssid + * @channel: channel where the match occurred + * @energy: + * @matching_feature: + * @matching_channels: bitmap of channels that matched, referencing + * the channels passed in tue scan offload request + */ +struct iwl_scan_offload_profile_match { + u8 bssid[ETH_ALEN]; + __le16 reserved; + u8 channel; + u8 energy; + u8 matching_feature; + u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; +} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ + +/** + * struct iwl_scan_offload_profiles_query - match results query response + * @matched_profiles: bitmap of matched profiles, referencing the + * matches passed in the scan offload request + * @last_scan_age: age of the last offloaded scan + * @n_scans_done: number of offloaded scans done + * @gp2_d0u: GP2 when D0U occurred + * @gp2_invoked: GP2 when scan offload was invoked + * @resume_while_scanning: not used + * @self_recovery: obsolete + * @reserved: reserved + * @matches: array of match information, one for each match + */ +struct iwl_scan_offload_profiles_query { + __le32 matched_profiles; + __le32 last_scan_age; + __le32 n_scans_done; + __le32 gp2_d0u; + __le32 gp2_invoked; + u8 resume_while_scanning; + u8 self_recovery; + __le16 reserved; + struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ + +/** + * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @scanned_channels: number of channels scanned and number of valid elements in + * results array + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @results: array of scan results, only "scanned_channels" of them are valid + */ +struct iwl_umac_scan_iter_complete_notif { + __le32 uid; + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[]; +} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */ + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h new file mode 100644 index 000000000000..493a8bdfbc9e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -0,0 +1,414 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_sta_h__ +#define __fw_api_sta_h__ + +/** + * enum iwl_sta_flags - flags for the ADD_STA host command + * @STA_FLG_REDUCED_TX_PWR_CTRL: + * @STA_FLG_REDUCED_TX_PWR_DATA: + * @STA_FLG_DISABLE_TX: set if TX should be disabled + * @STA_FLG_PS: set if STA is in Power Save + * @STA_FLG_INVALID: set if STA is invalid + * @STA_FLG_DLP_EN: Direct Link Protocol is enabled + * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs + * @STA_FLG_DRAIN_FLOW: drain flow + * @STA_FLG_PAN: STA is for PAN interface + * @STA_FLG_CLASS_AUTH: + * @STA_FLG_CLASS_ASSOC: + * @STA_FLG_CLASS_MIMO_PROT: + * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU + * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation + * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is + * initialised by driver and can be updated by fw upon reception of + * action frames that can change the channel width. When cleared the fw + * will send all the frames in 20MHz even when FAT channel is requested. + * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the + * driver and can be updated by fw upon reception of action frames. + * @STA_FLG_MFP_EN: Management Frame Protection + */ +enum iwl_sta_flags { + STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), + STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), + + STA_FLG_DISABLE_TX = BIT(4), + + STA_FLG_PS = BIT(8), + STA_FLG_DRAIN_FLOW = BIT(12), + STA_FLG_PAN = BIT(13), + STA_FLG_CLASS_AUTH = BIT(14), + STA_FLG_CLASS_ASSOC = BIT(15), + STA_FLG_RTS_MIMO_PROT = BIT(17), + + STA_FLG_MAX_AGG_SIZE_SHIFT = 19, + STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), + + STA_FLG_AGG_MPDU_DENS_SHIFT = 23, + STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), + + STA_FLG_FAT_EN_20MHZ = (0 << 26), + STA_FLG_FAT_EN_40MHZ = (1 << 26), + STA_FLG_FAT_EN_80MHZ = (2 << 26), + STA_FLG_FAT_EN_160MHZ = (3 << 26), + STA_FLG_FAT_EN_MSK = (3 << 26), + + STA_FLG_MIMO_EN_SISO = (0 << 28), + STA_FLG_MIMO_EN_MIMO2 = (1 << 28), + STA_FLG_MIMO_EN_MIMO3 = (2 << 28), + STA_FLG_MIMO_EN_MSK = (3 << 28), +}; + +/** + * enum iwl_sta_key_flag - key flags for the ADD_STA host command + * @STA_KEY_FLG_NO_ENC: no encryption + * @STA_KEY_FLG_WEP: WEP encryption algorithm + * @STA_KEY_FLG_CCM: CCMP encryption algorithm + * @STA_KEY_FLG_TKIP: TKIP encryption algorithm + * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) + * @STA_KEY_FLG_CMAC: CMAC encryption algorithm + * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm + * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value + * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from + * station info array (1 - n 1X mode) + * @STA_KEY_FLG_KEYID_MSK: the index of the key + * @STA_KEY_NOT_VALID: key is invalid + * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key + * @STA_KEY_MULTICAST: set for multical key + * @STA_KEY_MFP: key is used for Management Frame Protection + */ +enum iwl_sta_key_flag { + STA_KEY_FLG_NO_ENC = (0 << 0), + STA_KEY_FLG_WEP = (1 << 0), + STA_KEY_FLG_CCM = (2 << 0), + STA_KEY_FLG_TKIP = (3 << 0), + STA_KEY_FLG_EXT = (4 << 0), + STA_KEY_FLG_CMAC = (6 << 0), + STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), + STA_KEY_FLG_EN_MSK = (7 << 0), + + STA_KEY_FLG_WEP_KEY_MAP = BIT(3), + STA_KEY_FLG_KEYID_POS = 8, + STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), + STA_KEY_NOT_VALID = BIT(11), + STA_KEY_FLG_WEP_13BYTES = BIT(12), + STA_KEY_MULTICAST = BIT(14), + STA_KEY_MFP = BIT(15), +}; + +/** + * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed + * @STA_MODIFY_KEY: this command modifies %key + * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx + * @STA_MODIFY_TX_RATE: unused + * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid + * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid + * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count + * @STA_MODIFY_PROT_TH: + * @STA_MODIFY_QUEUES: modify the queues used by this station + */ +enum iwl_sta_modify_flag { + STA_MODIFY_KEY = BIT(0), + STA_MODIFY_TID_DISABLE_TX = BIT(1), + STA_MODIFY_TX_RATE = BIT(2), + STA_MODIFY_ADD_BA_TID = BIT(3), + STA_MODIFY_REMOVE_BA_TID = BIT(4), + STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), + STA_MODIFY_PROT_TH = BIT(6), + STA_MODIFY_QUEUES = BIT(7), +}; + +#define STA_MODE_MODIFY 1 + +/** + * enum iwl_sta_sleep_flag - type of sleep of the station + * @STA_SLEEP_STATE_AWAKE: + * @STA_SLEEP_STATE_PS_POLL: + * @STA_SLEEP_STATE_UAPSD: + * @STA_SLEEP_STATE_MOREDATA: set more-data bit on + * (last) released frame + */ +enum iwl_sta_sleep_flag { + STA_SLEEP_STATE_AWAKE = 0, + STA_SLEEP_STATE_PS_POLL = BIT(0), + STA_SLEEP_STATE_UAPSD = BIT(1), + STA_SLEEP_STATE_MOREDATA = BIT(2), +}; + +/* STA ID and color bits definitions */ +#define STA_ID_SEED (0x0f) +#define STA_ID_POS (0) +#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS) + +#define STA_COLOR_SEED (0x7) +#define STA_COLOR_POS (4) +#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS) + +#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \ + (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS) +#define STA_ID_N_COLOR_GET_ID(id_n_color) \ + (((id_n_color) & STA_ID_MSK) >> STA_ID_POS) + +#define STA_KEY_MAX_NUM (16) +#define STA_KEY_IDX_INVALID (0xff) +#define STA_KEY_MAX_DATA_KEY_NUM (4) +#define IWL_MAX_GLOBAL_KEYS (4) +#define STA_KEY_LEN_WEP40 (5) +#define STA_KEY_LEN_WEP104 (13) + +/** + * struct iwl_mvm_keyinfo - key information + * @key_flags: type %iwl_sta_key_flag + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + * @key_offset: key offset in the fw's key table + * @key: 16-byte unicast decryption key + * @tx_secur_seq_cnt: initial RSC / PN needed for replay check + * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only + * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only + */ +struct iwl_mvm_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; + u8 reserved1; + __le16 tkip_rx_ttak[5]; + u8 key_offset; + u8 reserved2; + u8 key[16]; + __le64 tx_secur_seq_cnt; + __le64 hw_tkip_mic_rx_key; + __le64 hw_tkip_mic_tx_key; +} __packed; + +/** + * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: 1: modify existing, 0: add new station + * @awake_acs: + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @mac_id_n_color: the Mac context this station belongs to + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @station_flags: look at %iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @sleep_state_flags: Look at %iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 sleep_state_flags; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; +} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + +/** + * struct iwl_mvm_add_sta_key_cmd - add/modify sta key + * ( REPLY_ADD_STA_KEY = 0x17 ) + * @sta_id: index of station in uCode's station table + * @key_offset: key offset in key storage + * @key_flags: type %iwl_sta_key_flag + * @key: key material data + * @key2: key material data + * @rx_secur_seq_cnt: RX security sequence counter for the key + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + */ +struct iwl_mvm_add_sta_key_cmd { + u8 sta_id; + u8 key_offset; + __le16 key_flags; + u8 key[16]; + u8 key2[16]; + u8 rx_secur_seq_cnt[16]; + u8 tkip_rx_tsc_byte2; + u8 reserved; + __le16 tkip_rx_ttak[5]; +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ + +/** + * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command + * @ADD_STA_SUCCESS: operation was executed successfully + * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table + * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session + * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that + * doesn't exist. + */ +enum iwl_mvm_add_sta_rsp_status { + ADD_STA_SUCCESS = 0x1, + ADD_STA_STATIONS_OVERLOAD = 0x2, + ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, + ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, +}; + +/** + * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table + * ( REMOVE_STA = 0x19 ) + * @sta_id: the station id of the station to be removed + */ +struct iwl_mvm_rm_sta_cmd { + u8 sta_id; + u8 reserved[3]; +} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ + +/** + * struct iwl_mvm_mgmt_mcast_key_cmd + * ( MGMT_MCAST_KEY = 0x1f ) + * @ctrl_flags: %iwl_sta_key_flag + * @IGTK: + * @K1: unused + * @K2: unused + * @sta_id: station ID that support IGTK + * @key_id: + * @receive_seq_cnt: initial RSC/PN needed for replay check + */ +struct iwl_mvm_mgmt_mcast_key_cmd { + __le32 ctrl_flags; + u8 IGTK[16]; + u8 K1[16]; + u8 K2[16]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ + +struct iwl_mvm_wep_key { + u8 key_index; + u8 key_offset; + __le16 reserved1; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __packed; + +struct iwl_mvm_wep_key_cmd { + __le32 mac_id_n_color; + u8 num_keys; + u8 decryption_type; + u8 flags; + u8 reserved; + struct iwl_mvm_wep_key wep_key[0]; +} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ + +/** + * struct iwl_mvm_eosp_notification - EOSP notification from firmware + * @remain_frame_count: # of frames remaining, non-zero if SP was cut + * short by GO absence + * @sta_id: station ID + */ +struct iwl_mvm_eosp_notification { + __le32 remain_frame_count; + __le32 sta_id; +} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ + +#endif /* __fw_api_sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h new file mode 100644 index 000000000000..0c321f63ee42 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h @@ -0,0 +1,284 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_stats_h__ +#define __fw_api_stats_h__ +#include "fw-api-mac.h" + +struct mvm_statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ + +struct mvm_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 rssi_ant; + __le32 reserved2; +} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ + +struct mvm_statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; + __le32 directed_data_mpdu; +} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ + +struct mvm_statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_lmt_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved; +} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ + +struct mvm_statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ + +struct mvm_statistics_tx_non_phy { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; +} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */ + +#define MAX_CHAINS 3 + +struct mvm_statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __s8 txpower[MAX_CHAINS]; + __s8 reserved; + __le32 reserved2; +} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ + +struct mvm_statistics_tx_channel_width { + __le32 ext_cca_narrow_ch20[1]; + __le32 ext_cca_narrow_ch40[2]; + __le32 ext_cca_narrow_ch80[3]; + __le32 ext_cca_narrow_ch160[4]; + __le32 last_tx_ch_width_indx; + __le32 rx_detected_per_ch_width[4]; + __le32 success_per_ch_width[4]; + __le32 fail_per_ch_width[4]; +}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ + +struct mvm_statistics_tx { + struct mvm_statistics_tx_non_phy general; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +} __packed; /* STATISTICS_TX_API_S_VER_4 */ + + +struct mvm_statistics_bt_activity { + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ + +struct mvm_statistics_general_v8 { + __le32 radio_temperature; + __le32 radio_voltage; + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div slow_div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; + __le32 beacon_filtered; + __le32 missed_beacons; + u8 beacon_filter_average_energy; + u8 beacon_filter_reason; + u8 beacon_filter_current_energy; + u8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; + __le64 rx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le64 tx_time; + __le32 beacon_counter[NUM_MAC_INDEX]; + u8 beacon_average_energy[NUM_MAC_INDEX]; + u8 reserved[4 - (NUM_MAC_INDEX % 4)]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ + +struct mvm_statistics_rx { + struct mvm_statistics_rx_phy ofdm; + struct mvm_statistics_rx_phy cck; + struct mvm_statistics_rx_non_phy general; + struct mvm_statistics_rx_ht_phy ofdm_ht; +} __packed; /* STATISTICS_RX_API_S_VER_3 */ + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * STATISTICS_CMD (0x9c), below. + */ + +struct iwl_notif_statistics_v10 { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general_v8 general; +} __packed; /* STATISTICS_NTFY_API_S_VER_10 */ + +#define IWL_STATISTICS_FLG_CLEAR 0x1 +#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2 + +struct iwl_statistics_cmd { + __le32 flags; +} __packed; /* STATISTICS_CMD_API_S_VER_1 */ + +#endif /* __fw_api_stats_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h new file mode 100644 index 000000000000..eed6271d01a3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __fw_api_tof_h__ +#define __fw_api_tof_h__ + +#include "fw-api.h" + +/* ToF sub-group command IDs */ +enum iwl_mvm_tof_sub_grp_ids { + TOF_RANGE_REQ_CMD = 0x1, + TOF_CONFIG_CMD = 0x2, + TOF_RANGE_ABORT_CMD = 0x3, + TOF_RANGE_REQ_EXT_CMD = 0x4, + TOF_RESPONDER_CONFIG_CMD = 0x5, + TOF_NW_INITIATED_RES_SEND_CMD = 0x6, + TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, + TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, + TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, + TOF_RANGE_RESPONSE_NOTIF = 0xFE, + TOF_MCSI_DEBUG_NOTIF = 0xFB, +}; + +/** + * struct iwl_tof_config_cmd - ToF configuration + * @tof_disabled: 0 enabled, 1 - disabled + * @one_sided_disabled: 0 enabled, 1 - disabled + * @is_debug_mode: 1 debug mode, 0 - otherwise + * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise + */ +struct iwl_tof_config_cmd { + __le32 sub_grp_cmd_id; + u8 tof_disabled; + u8 one_sided_disabled; + u8 is_debug_mode; + u8 is_buf_required; +} __packed; + +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * @burst_period: future use: (currently hard coded in the LMAC) + * The interval between two sequential bursts. + * @min_delta_ftm: future use: (currently hard coded in the LMAC) + * The minimum delay between two sequential FTM Responses + * in the same burst. + * @burst_duration: future use: (currently hard coded in the LMAC) + * The total time for all FTMs handshake in the same burst. + * Affect the time events duration in the LMAC. + * @num_of_burst_exp: future use: (currently hard coded in the LMAC) + * The number of bursts for the current ToF request. Affect + * the number of events allocations in the current iteration. + * @get_ch_est: for xVT only, NA for driver + * @abort_responder: when set to '1' - Responder will terminate its activity + * (all other fields in the command are ignored) + * @recv_sta_req_params: 1 - Responder will ignore the other Responder's + * params and use the recomended Initiator params. + * 0 - otherwise + * @channel_num: current AP Channel + * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rate: current AP rate + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @ftm_per_burst: FTMs per Burst + * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, + * '1' - we measure over the Initial FTM Response + * @asap_mode: ASAP / Non ASAP mode for the current WLS station + * @sta_id: index of the AP STA when in AP mode + * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF + * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @bssid: Current AP BSSID + */ +struct iwl_tof_responder_config_cmd { + __le32 sub_grp_cmd_id; + __le16 burst_period; + u8 min_delta_ftm; + u8 burst_duration; + u8 num_of_burst_exp; + u8 get_ch_est; + u8 abort_responder; + u8 recv_sta_req_params; + u8 channel_num; + u8 bandwidth; + u8 rate; + u8 ctrl_ch_position; + u8 ftm_per_burst; + u8 ftm_resp_ts_avail; + u8 asap_mode; + u8 sta_id; + __le16 tsf_timer_offset_msecs; + __le16 toa_offset; + u8 bssid[ETH_ALEN]; +} __packed; + +/** + * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF + * @min_delta_ftm: Minimal time between two consecutive measurements, + * in units of 100us. 0 means no preference by station + * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended + * value be sent to the AP + * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended + * value to be sent to the AP + * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended + * value to be sent to the AP + */ +struct iwl_tof_range_req_ext_cmd { + __le32 sub_grp_cmd_id; + __le16 tsf_timer_offset_msec; + __le16 reserved; + u8 min_delta_ftm; + u8 ftm_format_and_bw20M; + u8 ftm_format_and_bw40M; + u8 ftm_format_and_bw80M; +} __packed; + +#define IWL_MVM_TOF_MAX_APS 21 + +/** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @channel_num: Current AP Channel + * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @tsf_delta_direction: TSF relatively to the subject AP + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @bssid: AP's bss id + * @measure_type: Measurement type: 0 - two sided, 1 - One sided + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the + * number of measurement iterations (min 2^0 = 1, max 2^14) + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts = 0 + * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) + * 1-sided: how many rts/cts pairs should be used per burst. + * @retries_per_sample: Max number of retries that the LMAC should send + * in case of no replies by the AP. + * @tsf_delta: TSF Delta in units of microseconds. + * The difference between the AP TSF and the device local clock. + * @location_req: Location Request Bit[0] LCI should be sent in the FTMR + * Bit[1] Civic should be sent in the FTMR + * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) + * @enable_dyn_ack: Enable Dynamic ACK BW. + * 0 Initiator interact with regular AP + * 1 Initiator interact with Responder machine: need to send the + * Initiator Acks with HT 40MHz / 80MHz, since the Responder should + * use it for its ch est measurement (this flag will be set when we + * configure the opposite machine to be Responder). + * @rssi: Last received value + * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. + */ +struct iwl_tof_range_req_ap_entry { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; +} __packed; + +/** + * enum iwl_tof_response_mode + * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as + * possible (not supported for this release) + * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon + * timeout expiration + * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the + * earlier of: measurements completion / timeout + * expiration. + */ +enum iwl_tof_response_mode { + IWL_MVM_TOF_RESPOSE_ASAP = 1, + IWL_MVM_TOF_RESPOSE_TIMEOUT, + IWL_MVM_TOF_RESPOSE_COMPLETE, +}; + +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @initiator: 0- NW initiated, 1 - Client Initiated + * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, + * '1' - run ML-Algo for ToF only + * @req_timeout: Requested timeout of the response in units of 100ms. + * This is equivalent to the session time configured to the + * LMAC in Initiator Request + * @report_policy: Supported partially for this release: For current release - + * the range report will be uploaded as a batch when ready or + * when the session is done (successfully / partially). + * one of iwl_tof_response_mode. + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), + * '1' Use MAC Address randomization according to the below + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + */ +struct iwl_tof_range_req_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 los_det_disable; + u8 num_of_ap; + u8 macaddr_random; + u8 macaddr_template[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +/** + * struct iwl_tof_gen_resp_cmd - generic ToF response + */ +struct iwl_tof_gen_resp_cmd { + __le32 sub_grp_cmd_id; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @measure_status: current APs measurement status + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [nSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [nsec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @range: Measured range [cm] + * @range_variance: Measured range variance [cm] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + __le16 reserved; + __le32 range; + __le32 range_variance; + __le32 timestamp; +} __packed; + +/** + * struct iwl_tof_range_rsp_ntfy - + * @request_id: A Token ID of the corresponding Range request + * @request_status: status of current measurement session + * @last_in_batch: reprot policy (when not all responses are uploaded at once) + * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + */ +struct iwl_tof_range_rsp_ntfy { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) +/** + * struct iwl_tof_mcsi_notif - used for debug + * @token: token ID for the current session + * @role: '0' - initiator, '1' - responder + * @initiator_bssid: initiator machine + * @responder_bssid: responder machine + * @mcsi_buffer: debug data + */ +struct iwl_tof_mcsi_notif { + u8 token; + u8 role; + __le16 reserved; + u8 initiator_bssid[ETH_ALEN]; + u8 responder_bssid[ETH_ALEN]; + u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; +} __packed; + +/** + * struct iwl_tof_neighbor_report_notif + * @bssid: BSSID of the AP which sent the report + * @request_token: same token as the corresponding request + * @status: + * @report_ie_len: the length of the response frame starting from the Element ID + * @data: the IEs + */ +struct iwl_tof_neighbor_report { + u8 bssid[ETH_ALEN]; + u8 request_token; + u8 status; + __le16 report_ie_len; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_abort_cmd + * @request_id: corresponds to a range request + */ +struct iwl_tof_range_abort_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 reserved[3]; +} __packed; + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h new file mode 100644 index 000000000000..853698ab8b05 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -0,0 +1,646 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_api_tx_h__ +#define __fw_api_tx_h__ + +/** + * enum iwl_tx_flags - bitmasks for tx_flags in TX command + * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame + * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame + * @TX_CMD_FLG_ACK: expect ACK from receiving station + * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. + * Otherwise, use rate_n_flags from the TX command + * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected + * Must set TX_CMD_FLG_ACK with this flag. + * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence + * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence + * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) + * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored + * on old firmwares). + * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame + * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. + * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command + * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU + * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame + * Should be set for beacons and probe responses + * @TX_CMD_FLG_CALIB: activate PA TX power calibrations + * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count + * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. + * Should be set for 26/30 length MAC headers + * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW + * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration + * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation + * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id + * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped + * @TX_CMD_FLG_EXEC_PAPD: execute PAPD + * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power + * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk + */ +enum iwl_tx_flags { + TX_CMD_FLG_PROT_REQUIRE = BIT(0), + TX_CMD_FLG_WRITE_TX_POWER = BIT(1), + TX_CMD_FLG_ACK = BIT(3), + TX_CMD_FLG_STA_RATE = BIT(4), + TX_CMD_FLG_BAR = BIT(6), + TX_CMD_FLG_TXOP_PROT = BIT(7), + TX_CMD_FLG_VHT_NDPA = BIT(8), + TX_CMD_FLG_HT_NDPA = BIT(9), + TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), + TX_CMD_FLG_BT_PRIO_POS = 11, + TX_CMD_FLG_BT_DIS = BIT(12), + TX_CMD_FLG_SEQ_CTL = BIT(13), + TX_CMD_FLG_MORE_FRAG = BIT(14), + TX_CMD_FLG_TSF = BIT(16), + TX_CMD_FLG_CALIB = BIT(17), + TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), + TX_CMD_FLG_MH_PAD = BIT(20), + TX_CMD_FLG_RESP_TO_DRV = BIT(21), + TX_CMD_FLG_CCMP_AGG = BIT(22), + TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), + TX_CMD_FLG_DUR = BIT(25), + TX_CMD_FLG_FW_DROP = BIT(26), + TX_CMD_FLG_EXEC_PAPD = BIT(27), + TX_CMD_FLG_PAPD_TYPE = BIT(28), + TX_CMD_FLG_HCCA_CHUNK = BIT(31) +}; /* TX_FLAGS_BITS_API_S_VER_1 */ + +/** + * enum iwl_tx_pm_timeouts - pm timeout values in TX command + * @PM_FRAME_NONE: no need to suspend sleep mode + * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU + * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec + */ +enum iwl_tx_pm_timeouts { + PM_FRAME_NONE = 0, + PM_FRAME_MGMT = 2, + PM_FRAME_ASSOC = 3, +}; + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_EXT 0x04 +#define TX_CMD_SEC_MSK 0x07 +#define TX_CMD_SEC_WEP_KEY_IDX_POS 6 +#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 +#define TX_CMD_SEC_KEY128 0x08 + +/* TODO: how does these values are OK with only 16 bit variable??? */ +/* + * TX command next frame info + * + * bits 0:2 - security control (TX_CMD_SEC_*) + * bit 3 - immediate ACK required + * bit 4 - rate is taken from STA table + * bit 5 - frame belongs to BA stream + * bit 6 - immediate BA response expected + * bit 7 - unused + * bits 8:15 - Station ID + * bits 16:31 - rate + */ +#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8) +#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10) +#define TX_CMD_NEXT_FRAME_BA_MSK (0x20) +#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40) +#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8) +#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00) +#define TX_CMD_NEXT_FRAME_STA_ID_POS (8) +#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000) +#define TX_CMD_NEXT_FRAME_RATE_POS (16) + +/* + * TX command Frame life time in us - to be written in pm_frame_timeout + */ +#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF +#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ +#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ +#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 + +/* + * TID for non QoS frames - to be written in tid_tspec + */ +#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT + +/* + * Limits on the retransmissions - to be written in {data,rts}_retry_limit + */ +#define IWL_DEFAULT_TX_RETRY 15 +#define IWL_MGMT_DFAULT_RETRY_LIMIT 3 +#define IWL_RTS_DFAULT_RETRY_LIMIT 60 +#define IWL_BAR_DFAULT_RETRY_LIMIT 60 +#define IWL_LOW_RETRY_LIMIT 7 + +/* TODO: complete documentation for try_cnt and btkill_cnt */ +/** + * struct iwl_tx_cmd - TX command struct to FW + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @tx_flags: combination of TX_CMD_FLG_* + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @sta_id: index of destination station in FW station table + * @sec_ctl: security control, TX_CMD_SEC_* + * @initial_rate_index: index into the the rate table for initial TX attempt. + * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. + * @key: security key + * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_* + * @life_time: frame life time (usecs??) + * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + + * btkill_cnd + reserved), first 32 bits. "0" disables usage. + * @dram_msb_ptr: upper bits of the scratch physical address + * @rts_retry_limit: max attempts for RTS + * @data_retry_limit: max attempts to send the data packet + * @tid_spec: TID/tspec + * @pm_frame_timeout: PM TX frame timeout + * + * The byte count (both len and next_frame_len) includes MAC header + * (24/26/30/32 bytes) + * + 2 bytes pad if 26/30 header size + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * It does not include post-MAC padding, i.e., + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. + * Range of len: 14-2342 bytes. + * + * After the struct fields the MAC header is placed, plus any padding, + * and then the actial payload. + */ +struct iwl_tx_cmd { + __le16 len; + __le16 next_frame_len; + __le32 tx_flags; + struct { + u8 try_cnt; + u8 btkill_cnt; + __le16 reserved; + } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ + __le32 rate_n_flags; + u8 sta_id; + u8 sec_ctl; + u8 initial_rate_index; + u8 reserved2; + u8 key[16]; + __le32 reserved3; + __le32 life_time; + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 tid_tspec; + __le16 pm_frame_timeout; + __le16 reserved4; + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_3 */ + +/* + * TX response related data + */ + +/* + * enum iwl_tx_status - status that is returned by the fw after attempts to Tx + * @TX_STATUS_SUCCESS: + * @TX_STATUS_DIRECT_DONE: + * @TX_STATUS_POSTPONE_DELAY: + * @TX_STATUS_POSTPONE_FEW_BYTES: + * @TX_STATUS_POSTPONE_BT_PRIO: + * @TX_STATUS_POSTPONE_QUIET_PERIOD: + * @TX_STATUS_POSTPONE_CALC_TTAK: + * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: + * @TX_STATUS_FAIL_SHORT_LIMIT: + * @TX_STATUS_FAIL_LONG_LIMIT: + * @TX_STATUS_FAIL_UNDERRUN: + * @TX_STATUS_FAIL_DRAIN_FLOW: + * @TX_STATUS_FAIL_RFKILL_FLUSH: + * @TX_STATUS_FAIL_LIFE_EXPIRE: + * @TX_STATUS_FAIL_DEST_PS: + * @TX_STATUS_FAIL_HOST_ABORTED: + * @TX_STATUS_FAIL_BT_RETRY: + * @TX_STATUS_FAIL_STA_INVALID: + * @TX_TATUS_FAIL_FRAG_DROPPED: + * @TX_STATUS_FAIL_TID_DISABLE: + * @TX_STATUS_FAIL_FIFO_FLUSHED: + * @TX_STATUS_FAIL_SMALL_CF_POLL: + * @TX_STATUS_FAIL_FW_DROP: + * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and + * STA table + * @TX_FRAME_STATUS_INTERNAL_ABORT: + * @TX_MODE_MSK: + * @TX_MODE_NO_BURST: + * @TX_MODE_IN_BURST_SEQ: + * @TX_MODE_FIRST_IN_BURST: + * @TX_QUEUE_NUM_MSK: + * + * Valid only if frame_count =1 + * TODO: complete documentation + */ +enum iwl_tx_status { + TX_STATUS_MSK = 0x000000ff, + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + /* postpone TX */ + TX_STATUS_POSTPONE_DELAY = 0x40, + TX_STATUS_POSTPONE_FEW_BYTES = 0x41, + TX_STATUS_POSTPONE_BT_PRIO = 0x42, + TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, + TX_STATUS_POSTPONE_CALC_TTAK = 0x44, + /* abort TX */ + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_UNDERRUN = 0x84, + TX_STATUS_FAIL_DRAIN_FLOW = 0x85, + TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_HOST_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, + TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, + TX_STATUS_FAIL_FW_DROP = 0x90, + TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, + TX_STATUS_INTERNAL_ABORT = 0x92, + TX_MODE_MSK = 0x00000f00, + TX_MODE_NO_BURST = 0x00000000, + TX_MODE_IN_BURST_SEQ = 0x00000100, + TX_MODE_FIRST_IN_BURST = 0x00000200, + TX_QUEUE_NUM_MSK = 0x0001f000, + TX_NARROW_BW_MSK = 0x00060000, + TX_NARROW_BW_1DIV2 = 0x00020000, + TX_NARROW_BW_1DIV4 = 0x00040000, + TX_NARROW_BW_1DIV8 = 0x00060000, +}; + +/* + * enum iwl_tx_agg_status - TX aggregation status + * @AGG_TX_STATE_STATUS_MSK: + * @AGG_TX_STATE_TRANSMITTED: + * @AGG_TX_STATE_UNDERRUN: + * @AGG_TX_STATE_BT_PRIO: + * @AGG_TX_STATE_FEW_BYTES: + * @AGG_TX_STATE_ABORT: + * @AGG_TX_STATE_LAST_SENT_TTL: + * @AGG_TX_STATE_LAST_SENT_TRY_CNT: + * @AGG_TX_STATE_LAST_SENT_BT_KILL: + * @AGG_TX_STATE_SCD_QUERY: + * @AGG_TX_STATE_TEST_BAD_CRC32: + * @AGG_TX_STATE_RESPONSE: + * @AGG_TX_STATE_DUMP_TX: + * @AGG_TX_STATE_DELAY_TX: + * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a member of a previous + * aggregation block). If rate scaling is used, retry count indicates the + * rate table entry used for all frames in the new agg. + *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for + * this frame + * + * TODO: complete documentation + */ +enum iwl_tx_agg_status { + AGG_TX_STATE_STATUS_MSK = 0x00fff, + AGG_TX_STATE_TRANSMITTED = 0x000, + AGG_TX_STATE_UNDERRUN = 0x001, + AGG_TX_STATE_BT_PRIO = 0x002, + AGG_TX_STATE_FEW_BYTES = 0x004, + AGG_TX_STATE_ABORT = 0x008, + AGG_TX_STATE_LAST_SENT_TTL = 0x010, + AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, + AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, + AGG_TX_STATE_SCD_QUERY = 0x080, + AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, + AGG_TX_STATE_RESPONSE = 0x1ff, + AGG_TX_STATE_DUMP_TX = 0x200, + AGG_TX_STATE_DELAY_TX = 0x400, + AGG_TX_STATE_TRY_CNT_POS = 12, + AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, +}; + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ + AGG_TX_STATE_LAST_SENT_TRY_CNT| \ + AGG_TX_STATE_LAST_SENT_BT_KILL) + +/* + * The mask below describes a status where we are absolutely sure that the MPDU + * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've + * written the bytes to the TXE, but we know nothing about what the DSP did. + */ +#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ + AGG_TX_STATE_ABORT | \ + AGG_TX_STATE_SCD_QUERY) + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for a single + * frame. Multiple attempts, at various bit rates, may have been made for + * this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for two or more + * frames that used block-acknowledge. All frames were transmitted at + * same rate. Rate scaling may have been used if first frame in this new + * agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered + * here; block-ack has not been received by the time the device records + * this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the device, rather than whether it was received successfully by + * the destination station. + */ + +/** + * struct agg_tx_status - per packet TX aggregation status + * @status: enum iwl_tx_agg_status + * @sequence: Sequence # for this frame's Tx cmd (not SSN!) + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __packed; + +/* + * definitions for initial rate index field + * bits [3:0] initial rate index + * bits [6:4] rate table color, used for the initial rate + * bit-7 invalid rate indication + */ +#define TX_RES_INIT_RATE_INDEX_MSK 0x0f +#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 +#define TX_RES_INV_RATE_INDEX_MSK 0x80 + +#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) +#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) + +/** + * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet + * ( REPLY_TX = 0x1c ) + * @frame_count: 1 no aggregation, >1 aggregation + * @bt_kill_count: num of times blocked by bluetooth (unused for agg) + * @failure_rts: num of failures due to unsuccessful RTS + * @failure_frame: num failures due to no ACK (unused for agg) + * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the + * Tx of all the batch. RATE_MCS_* + * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. + * for agg: RTS + CTS + aggregation tx time + block-ack time. + * in usec. + * @pa_status: tx power info + * @pa_integ_res_a: tx power info + * @pa_integ_res_b: tx power info + * @pa_integ_res_c: tx power info + * @measurement_req_id: tx power info + * @tfd_info: TFD information set by the FH + * @seq_ctl: sequence control from the Tx cmd + * @byte_cnt: byte count from the Tx cmd + * @tlc_info: TLC rate info + * @ra_tid: bits [3:0] = ra, bits [7:4] = tid + * @frame_ctrl: frame control + * @status: for non-agg: frame status TX_STATUS_* + * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields + * follow this one, up to frame_count. + * + * After the array of statuses comes the SSN of the SCD. Look at + * %iwl_mvm_get_scd_ssn for more details. + */ +struct iwl_mvm_tx_resp { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 initial_rate; + __le16 wireless_media_time; + + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_c[3]; + __le16 measurement_req_id; + u8 reduced_tpc; + u8 reserved; + + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + + struct agg_tx_status status; +} __packed; /* TX_RSP_API_S_VER_3 */ + +/** + * struct iwl_mvm_ba_notif - notifies about reception of BA + * ( BA_NOTIF = 0xc5 ) + * @sta_addr_lo32: lower 32 bits of the MAC address + * @sta_addr_hi16: upper 16 bits of the MAC address + * @sta_id: Index of recipient (BA-sending) station in fw's station table + * @tid: tid of the session + * @seq_ctl: + * @bitmap: the bitmap of the BA notification as seen in the air + * @scd_flow: the tx queue this BA relates to + * @scd_ssn: the index of the last contiguously sent packet + * @txed: number of Txed frames in this batch + * @txed_2_done: number of Acked frames in this batch + */ +struct iwl_mvm_ba_notif { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; + u8 txed; + u8 txed_2_done; + __le16 reserved1; +} __packed; + +/* + * struct iwl_mac_beacon_cmd - beacon template command + * @tx: the tx commands associated with the beacon frame + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @frame: the template of the beacon frame + */ +struct iwl_mac_beacon_cmd { + struct iwl_tx_cmd tx; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + struct ieee80211_hdr frame[0]; +} __packed; + +struct iwl_beacon_notif { + struct iwl_mvm_tx_resp beacon_notify_hdr; + __le64 tsf; + __le32 ibss_mgr_status; +} __packed; + +/** + * struct iwl_extended_beacon_notif - notifies about beacon transmission + * @beacon_notify_hdr: tx response command associated with the beacon + * @tsf: last beacon tsf + * @ibss_mgr_status: whether IBSS is manager + * @gp2: last beacon time in gp2 + */ +struct iwl_extended_beacon_notif { + struct iwl_mvm_tx_resp beacon_notify_hdr; + __le64 tsf; + __le32 ibss_mgr_status; + __le32 gp2; +} __packed; /* BEACON_NTFY_API_S_VER_5 */ + +/** + * enum iwl_dump_control - dump (flush) control flags + * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty + * and the TFD queues are empty. + */ +enum iwl_dump_control { + DUMP_TX_FIFO_FLUSH = BIT(1), +}; + +/** + * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command + * @queues_ctl: bitmap of queues to flush + * @flush_ctl: control flags + * @reserved: reserved + */ +struct iwl_tx_path_flush_cmd { + __le32 queues_ctl; + __le16 flush_ctl; + __le16 reserved; +} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ + +/** + * iwl_mvm_get_scd_ssn - returns the SSN of the SCD + * @tx_resp: the Tx response from the fw (agg or non-agg) + * + * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since + * it can't know that everything will go well until the end of the AMPDU, it + * can't know in advance the number of MPDUs that will be sent in the current + * batch. This is why it writes the agg Tx response while it fetches the MPDUs. + * Hence, it can't know in advance what the SSN of the SCD will be at the end + * of the batch. This is why the SSN of the SCD is written at the end of the + * whole struct at a variable offset. This function knows how to cope with the + * variable offset and returns the SSN of the SCD. + */ +static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)&tx_resp->status + + tx_resp->frame_count) & 0xfff; +} + +/** + * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command + * @token: + * @sta_id: station id + * @tid: + * @scd_queue: scheduler queue to confiug + * @enable: 1 queue enable, 0 queue disable + * @aggregate: 1 aggregated queue, 0 otherwise + * @tx_fifo: %enum iwl_mvm_tx_fifo + * @window: BA window size + * @ssn: SSN for the BA agreement + */ +struct iwl_scd_txq_cfg_cmd { + u8 token; + u8 sta_id; + u8 tid; + u8 scd_queue; + u8 enable; + u8 aggregate; + u8 tx_fifo; + u8 window; + __le16 ssn; + __le16 reserved; +} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ + +/** + * struct iwl_scd_txq_cfg_rsp + * @token: taken from the command + * @sta_id: station id from the command + * @tid: tid from the command + * @scd_queue: scd_queue from the command + */ +struct iwl_scd_txq_cfg_rsp { + u8 token; + u8 sta_id; + u8 tid; + u8 scd_queue; +} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ + +#endif /* __fw_api_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h new file mode 100644 index 000000000000..181590fbd3b3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -0,0 +1,1773 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __fw_api_h__ +#define __fw_api_h__ + +#include "fw-api-rs.h" +#include "fw-api-rx.h" +#include "fw-api-tx.h" +#include "fw-api-sta.h" +#include "fw-api-mac.h" +#include "fw-api-power.h" +#include "fw-api-d3.h" +#include "fw-api-coex.h" +#include "fw-api-scan.h" +#include "fw-api-stats.h" +#include "fw-api-tof.h" + +/* Tx queue numbers */ +enum { + IWL_MVM_OFFCHANNEL_QUEUE = 8, + IWL_MVM_CMD_QUEUE = 9, +}; + +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; + +#define IWL_MVM_STATION_COUNT 16 + +#define IWL_MVM_TDLS_STA_COUNT 4 + +/* commands */ +enum { + MVM_ALIVE = 0x1, + REPLY_ERROR = 0x2, + ECHO_CMD = 0x3, + + INIT_COMPLETE_NOTIF = 0x4, + + /* PHY context commands */ + PHY_CONTEXT_CMD = 0x8, + DBG_CFG = 0x9, + ANTENNA_COUPLING_NOTIFICATION = 0xa, + + /* UMAC scan commands */ + SCAN_ITERATION_COMPLETE_UMAC = 0xb5, + SCAN_CFG_CMD = 0xc, + SCAN_REQ_UMAC = 0xd, + SCAN_ABORT_UMAC = 0xe, + SCAN_COMPLETE_UMAC = 0xf, + + /* station table */ + ADD_STA_KEY = 0x17, + ADD_STA = 0x18, + REMOVE_STA = 0x19, + + /* paging get item */ + FW_GET_ITEM_CMD = 0x1a, + + /* TX */ + TX_CMD = 0x1c, + TXPATH_FLUSH = 0x1e, + MGMT_MCAST_KEY = 0x1f, + + /* scheduler config */ + SCD_QUEUE_CFG = 0x1d, + + /* global key */ + WEP_KEY = 0x20, + + /* Memory */ + SHARED_MEM_CFG = 0x25, + + /* TDLS */ + TDLS_CHANNEL_SWITCH_CMD = 0x27, + TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, + TDLS_CONFIG_CMD = 0xa7, + + /* MAC and Binding commands */ + MAC_CONTEXT_CMD = 0x28, + TIME_EVENT_CMD = 0x29, /* both CMD and response */ + TIME_EVENT_NOTIFICATION = 0x2a, + BINDING_CONTEXT_CMD = 0x2b, + TIME_QUOTA_CMD = 0x2c, + NON_QOS_TX_COUNTER_CMD = 0x2d, + + LQ_CMD = 0x4e, + + /* paging block to FW cpu2 */ + FW_PAGING_BLOCK_CMD = 0x4f, + + /* Scan offload */ + SCAN_OFFLOAD_REQUEST_CMD = 0x51, + SCAN_OFFLOAD_ABORT_CMD = 0x52, + HOT_SPOT_CMD = 0x53, + SCAN_OFFLOAD_COMPLETE = 0x6D, + SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, + SCAN_OFFLOAD_CONFIG_CMD = 0x6f, + MATCH_FOUND_NOTIFICATION = 0xd9, + SCAN_ITERATION_COMPLETE = 0xe7, + + /* Phy */ + PHY_CONFIGURATION_CMD = 0x6a, + CALIB_RES_NOTIF_PHY_DB = 0x6b, + /* PHY_DB_CMD = 0x6c, */ + + /* ToF - 802.11mc FTM */ + TOF_CMD = 0x10, + TOF_NOTIFICATION = 0x11, + + /* Power - legacy power table command */ + POWER_TABLE_CMD = 0x77, + PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, + LTR_CONFIG = 0xee, + + /* Thermal Throttling*/ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, + + /* Set/Get DC2DC frequency tune */ + DC2DC_CONFIG_CMD = 0x83, + + /* NVM */ + NVM_ACCESS_CMD = 0x88, + + SET_CALIB_DEFAULT_CMD = 0x8e, + + BEACON_NOTIFICATION = 0x90, + BEACON_TEMPLATE_CMD = 0x91, + TX_ANT_CONFIGURATION_CMD = 0x98, + STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + EOSP_NOTIFICATION = 0x9e, + REDUCE_TX_POWER_CMD = 0x9f, + + /* RF-KILL commands and notifications */ + CARD_STATE_CMD = 0xa0, + CARD_STATE_NOTIFICATION = 0xa1, + + MISSED_BEACONS_NOTIFICATION = 0xa2, + + /* Power - new power table command */ + MAC_PM_POWER_TABLE = 0xa9, + + MFUART_LOAD_NOTIFICATION = 0xb1, + + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + BA_NOTIF = 0xc5, + + /* Location Aware Regulatory */ + MCC_UPDATE_CMD = 0xc8, + MCC_CHUB_UPDATE_CMD = 0xc9, + + MARKER_CMD = 0xcb, + + /* BT Coex */ + BT_COEX_PRIO_TABLE = 0xcc, + BT_COEX_PROT_ENV = 0xcd, + BT_PROFILE_NOTIFICATION = 0xce, + BT_CONFIG = 0x9b, + BT_COEX_UPDATE_SW_BOOST = 0x5a, + BT_COEX_UPDATE_CORUN_LUT = 0x5b, + BT_COEX_UPDATE_REDUCED_TXP = 0x5c, + BT_COEX_CI = 0x5d, + + REPLY_SF_CFG_CMD = 0xd1, + REPLY_BEACON_FILTERING_CMD = 0xd2, + + /* DTS measurements */ + CMD_DTS_MEASUREMENT_TRIGGER = 0xdc, + DTS_MEASUREMENT_NOTIFICATION = 0xdd, + + REPLY_DEBUG_CMD = 0xf0, + DEBUG_LOG_MSG = 0xf7, + + BCAST_FILTER_CMD = 0xcf, + MCAST_FILTER_CMD = 0xd0, + + /* D3 commands/notifications */ + D3_CONFIG_CMD = 0xd3, + PROT_OFFLOAD_CONFIG_CMD = 0xd4, + OFFLOADS_QUERY_CMD = 0xd5, + REMOTE_WAKE_CONFIG_CMD = 0xd6, + D0I3_END_CMD = 0xed, + + /* for WoWLAN in particular */ + WOWLAN_PATTERNS = 0xe0, + WOWLAN_CONFIGURATION = 0xe1, + WOWLAN_TSC_RSC_PARAM = 0xe2, + WOWLAN_TKIP_PARAM = 0xe3, + WOWLAN_KEK_KCK_MATERIAL = 0xe4, + WOWLAN_GET_STATUSES = 0xe5, + WOWLAN_TX_POWER_PER_DB = 0xe6, + + /* and for NetDetect */ + SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, + SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58, + SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59, + + REPLY_MAX = 0xff, +}; + +enum iwl_phy_ops_subcmd_ids { + CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, +}; + +/* command groups */ +enum { + PHY_OPS_GROUP = 0x4, +}; + +/** + * struct iwl_cmd_response - generic response struct for most commands + * @status: status of the command asked, changes for each one + */ +struct iwl_cmd_response { + __le32 status; +}; + +/* + * struct iwl_tx_ant_cfg_cmd + * @valid: valid antenna configuration + */ +struct iwl_tx_ant_cfg_cmd { + __le32 valid; +} __packed; + +/* + * Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers. + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers. + */ +struct iwl_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +/* This enum defines the bitmap of various calibrations to enable in both + * init ucode and runtime ucode through CALIBRATION_CFG_CMD. + */ +enum iwl_calib_cfg { + IWL_CALIB_CFG_XTAL_IDX = BIT(0), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), + IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), + IWL_CALIB_CFG_PAPD_IDX = BIT(3), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), + IWL_CALIB_CFG_DC_IDX = BIT(5), + IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), + IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), + IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), + IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), + IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), + IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), + IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), + IWL_CALIB_CFG_DAC_IDX = BIT(16), + IWL_CALIB_CFG_ABS_IDX = BIT(17), + IWL_CALIB_CFG_AGC_IDX = BIT(18), +}; + +/* + * Phy configuration command. + */ +struct iwl_phy_cfg_cmd { + __le32 phy_cfg; + struct iwl_calib_ctrl calib_control; +} __packed; + +#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) +#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) +#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) +#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) +#define PHY_CFG_TX_CHAIN_A BIT(8) +#define PHY_CFG_TX_CHAIN_B BIT(9) +#define PHY_CFG_TX_CHAIN_C BIT(10) +#define PHY_CFG_RX_CHAIN_A BIT(12) +#define PHY_CFG_RX_CHAIN_B BIT(13) +#define PHY_CFG_RX_CHAIN_C BIT(14) + + +/* Target of the NVM_ACCESS_CMD */ +enum { + NVM_ACCESS_TARGET_CACHE = 0, + NVM_ACCESS_TARGET_OTP = 1, + NVM_ACCESS_TARGET_EEPROM = 2, +}; + +/* Section types for NVM_ACCESS_CMD */ +enum { + NVM_SECTION_TYPE_SW = 1, + NVM_SECTION_TYPE_REGULATORY = 3, + NVM_SECTION_TYPE_CALIBRATION = 4, + NVM_SECTION_TYPE_PRODUCTION = 5, + NVM_SECTION_TYPE_MAC_OVERRIDE = 11, + NVM_SECTION_TYPE_PHY_SKU = 12, + NVM_MAX_NUM_SECTIONS = 13, +}; + +/** + * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section + * @op_code: 0 - read, 1 - write + * @target: NVM_ACCESS_TARGET_* + * @type: NVM_SECTION_TYPE_* + * @offset: offset in bytes into the section + * @length: in bytes, to read/write + * @data: if write operation, the data to write. On read its empty + */ +struct iwl_nvm_access_cmd { + u8 op_code; + u8 target; + __le16 type; + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ + +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/* + * struct iwl_fw_paging_cmd - paging layout + * + * (FW_PAGING_BLOCK_CMD = 0x4f) + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side +*/ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/* + * Fw items ID's + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/* + * struct iwl_fw_get_item_cmd - get an item from the fw + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + +/** + * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * @offset: offset in bytes into the section + * @length: in bytes, either how much was written or read + * @type: NVM_SECTION_TYPE_* + * @status: 0 for success, fail otherwise + * @data: if read operation, the data returned. Empty on write. + */ +struct iwl_nvm_access_resp { + __le16 offset; + __le16 length; + __le16 type; + __le16 status; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ + +/* MVM_ALIVE 0x1 */ + +/* alive response is_valid values */ +#define ALIVE_RESP_UCODE_OK BIT(0) +#define ALIVE_RESP_RFKILL BIT(1) + +/* alive response ver_type values */ +enum { + FW_TYPE_HW = 0, + FW_TYPE_PROT = 1, + FW_TYPE_AP = 2, + FW_TYPE_WOWLAN = 3, + FW_TYPE_TIMING = 4, + FW_TYPE_WIPAN = 5 +}; + +/* alive response ver_subtype values */ +enum { + FW_SUBTYPE_FULL_FEATURE = 0, + FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ + FW_SUBTYPE_REDUCED = 2, + FW_SUBTYPE_ALIVE_ONLY = 3, + FW_SUBTYPE_WOWLAN = 4, + FW_SUBTYPE_AP_SUBTYPE = 5, + FW_SUBTYPE_WIPAN = 6, + FW_SUBTYPE_INITIALIZE = 9 +}; + +#define IWL_ALIVE_STATUS_ERR 0xDEAD +#define IWL_ALIVE_STATUS_OK 0xCAFE + +#define IWL_ALIVE_FLG_RFKILL BIT(0) + +struct mvm_alive_resp_ver1 { + __le16 status; + __le16 flags; + u8 ucode_minor; + u8 ucode_major; + __le16 id; + u8 api_minor; + u8 api_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le16 reserved2; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ +} __packed; /* ALIVE_RES_API_S_VER_1 */ + +struct mvm_alive_resp_ver2 { + __le16 status; + __le16 flags; + u8 ucode_minor; + u8 ucode_major; + __le16 id; + u8 api_minor; + u8 api_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le16 reserved2; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ + __le32 st_fwrd_addr; /* pointer to Store and forward */ + __le32 st_fwrd_size; + u8 umac_minor; /* UMAC version: minor */ + u8 umac_major; /* UMAC version: major */ + __le16 umac_id; /* UMAC version: id */ + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* ALIVE_RES_API_S_VER_2 */ + +struct mvm_alive_resp { + __le16 status; + __le16 flags; + __le32 ucode_minor; + __le32 ucode_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ + __le32 st_fwrd_addr; /* pointer to Store and forward */ + __le32 st_fwrd_size; + __le32 umac_minor; /* UMAC version: minor */ + __le32 umac_major; /* UMAC version: major */ + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* ALIVE_RES_API_S_VER_3 */ + +/* Error response/notification */ +enum { + FW_ERR_UNKNOWN_CMD = 0x0, + FW_ERR_INVALID_CMD_PARAM = 0x1, + FW_ERR_SERVICE = 0x2, + FW_ERR_ARC_MEMORY = 0x3, + FW_ERR_ARC_CODE = 0x4, + FW_ERR_WATCH_DOG = 0x5, + FW_ERR_WEP_GRP_KEY_INDX = 0x10, + FW_ERR_WEP_KEY_SIZE = 0x11, + FW_ERR_OBSOLETE_FUNC = 0x12, + FW_ERR_UNEXPECTED = 0xFE, + FW_ERR_FATAL = 0xFF +}; + +/** + * struct iwl_error_resp - FW error indication + * ( REPLY_ERROR = 0x2 ) + * @error_type: one of FW_ERR_* + * @cmd_id: the command ID for which the error occured + * @bad_cmd_seq_num: sequence number of the erroneous command + * @error_service: which service created the error, applicable only if + * error_type = 2, otherwise 0 + * @timestamp: TSF in usecs. + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_service; + __le64 timestamp; +} __packed; + + +/* Common PHY, MAC and Bindings definitions */ + +#define MAX_MACS_IN_BINDING (3) +#define MAX_BINDINGS (4) +#define AUX_BINDING_INDEX (3) +#define MAX_PHYS (4) + +/* Used to extract ID and color from the context dword */ +#define FW_CTXT_ID_POS (0) +#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS) +#define FW_CTXT_COLOR_POS (8) +#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS) +#define FW_CTXT_INVALID (0xffffffff) + +#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\ + (_color << FW_CTXT_COLOR_POS)) + +/* Possible actions on PHYs, MACs and Bindings */ +enum { + FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_ADD, + FW_CTXT_ACTION_MODIFY, + FW_CTXT_ACTION_REMOVE, + FW_CTXT_ACTION_NUM +}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ + +/* Time Events */ + +/* Time Event types, according to MAC type */ +enum iwl_time_event_type { + /* BSS Station Events */ + TE_BSS_STA_AGGRESSIVE_ASSOC, + TE_BSS_STA_ASSOC, + TE_BSS_EAP_DHCP_PROT, + TE_BSS_QUIET_PERIOD, + + /* P2P Device Events */ + TE_P2P_DEVICE_DISCOVERABLE, + TE_P2P_DEVICE_LISTEN, + TE_P2P_DEVICE_ACTION_SCAN, + TE_P2P_DEVICE_FULL_SCAN, + + /* P2P Client Events */ + TE_P2P_CLIENT_AGGRESSIVE_ASSOC, + TE_P2P_CLIENT_ASSOC, + TE_P2P_CLIENT_QUIET_PERIOD, + + /* P2P GO Events */ + TE_P2P_GO_ASSOC_PROT, + TE_P2P_GO_REPETITIVE_NOA, + TE_P2P_GO_CT_WINDOW, + + /* WiDi Sync Events */ + TE_WIDI_TX_SYNC, + + /* Channel Switch NoA */ + TE_CHANNEL_SWITCH_PERIOD, + + TE_MAX +}; /* MAC_EVENT_TYPE_API_E_VER_1 */ + + + +/* Time event - defines for command API v1 */ + +/* + * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V1_FRAG_NONE = 0, + TE_V1_FRAG_SINGLE = 1, + TE_V1_FRAG_DUAL = 2, + TE_V1_FRAG_ENDLESS = 0xffffffff +}; + +/* If a Time Event can be fragmented, this is the max number of fragments */ +#define TE_V1_FRAG_MAX_MSK 0x0fffffff +/* Repeat the time event endlessly (until removed) */ +#define TE_V1_REPEAT_ENDLESS 0xffffffff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff + +/* Time Event dependencies: none, on another TE, or in a specific time */ +enum { + TE_V1_INDEPENDENT = 0, + TE_V1_DEP_OTHER = BIT(0), + TE_V1_DEP_TSF = BIT(1), + TE_V1_EVENT_SOCIOPATHIC = BIT(2), +}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ + +/* + * @TE_V1_NOTIF_NONE: no notifications + * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. + * + * Supported Time event notifications configuration. + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + */ +enum { + TE_V1_NOTIF_NONE = 0, + TE_V1_NOTIF_HOST_EVENT_START = BIT(0), + TE_V1_NOTIF_HOST_EVENT_END = BIT(1), + TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), + TE_V1_NOTIF_HOST_FRAG_START = BIT(4), + TE_V1_NOTIF_HOST_FRAG_END = BIT(5), + TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), +}; /* MAC_EVENT_ACTION_API_E_VER_2 */ + +/* Time event - defines for command API */ + +/* + * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V2_FRAG_NONE = 0, + TE_V2_FRAG_SINGLE = 1, + TE_V2_FRAG_DUAL = 2, + TE_V2_FRAG_MAX = 0xfe, + TE_V2_FRAG_ENDLESS = 0xff +}; + +/* Repeat the time event endlessly (until removed) */ +#define TE_V2_REPEAT_ENDLESS 0xff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V2_REPEAT_MAX 0xfe + +#define TE_V2_PLACEMENT_POS 12 +#define TE_V2_ABSENCE_POS 15 + +/* Time event policy values + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + * + * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable + * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. + * @TE_V2_DEP_OTHER: depends on another time event + * @TE_V2_DEP_TSF: depends on a specific time + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC + * @TE_V2_ABSENCE: are we present or absent during the Time Event. + */ +enum { + TE_V2_DEFAULT_POLICY = 0x0, + + /* notifications (event start/stop, fragment start/stop) */ + TE_V2_NOTIF_HOST_EVENT_START = BIT(0), + TE_V2_NOTIF_HOST_EVENT_END = BIT(1), + TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), + + TE_V2_NOTIF_HOST_FRAG_START = BIT(4), + TE_V2_NOTIF_HOST_FRAG_END = BIT(5), + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), + T2_V2_START_IMMEDIATELY = BIT(11), + + TE_V2_NOTIF_MSK = 0xff, + + /* placement characteristics */ + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), + TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), + TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), + + /* are we present or absent during the Time Event. */ + TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), +}; + +/** + * struct iwl_time_event_cmd_api - configuring Time Events + * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also + * with version 1. determined by IWL_UCODE_TLV_FLAGS) + * ( TIME_EVENT_CMD = 0x29 ) + * @id_and_color: ID and color of the relevant MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @id: this field has two meanings, depending on the action: + * If the action is ADD, then it means the type of event to add. + * For all other actions it is the unique event ID assigned when the + * event was added by the FW. + * @apply_time: When to start the Time Event (in GP2) + * @max_delay: maximum delay to event's start (apply time), in TU + * @depends_on: the unique ID of the event we depend on (if any) + * @interval: interval between repetitions, in TU + * @duration: duration of event in TU + * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS + * @max_frags: maximal number of fragments the Time Event can be divided to + * @policy: defines whether uCode shall notify the host or other uCode modules + * on event and/or fragment start and/or end + * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF + * TE_EVENT_SOCIOPATHIC + * using TE_ABSENCE and using TE_NOTIF_* + */ +struct iwl_time_event_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le32 id; + /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ + __le32 apply_time; + __le32 max_delay; + __le32 depends_on; + __le32 interval; + __le32 duration; + u8 repeat; + u8 max_frags; + __le16 policy; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ + +/** + * struct iwl_time_event_resp - response structure to iwl_time_event_cmd + * @status: bit 0 indicates success, all others specify errors + * @id: the Time Event type + * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE + * @id_and_color: ID and color of the relevant MAC + */ +struct iwl_time_event_resp { + __le32 status; + __le32 id; + __le32 unique_id; + __le32 id_and_color; +} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ + +/** + * struct iwl_time_event_notif - notifications of time event start/stop + * ( TIME_EVENT_NOTIFICATION = 0x2a ) + * @timestamp: action timestamp in GP2 + * @session_id: session's unique id + * @unique_id: unique id of the Time Event itself + * @id_and_color: ID and color of the relevant MAC + * @action: one of TE_NOTIF_START or TE_NOTIF_END + * @status: true if scheduled, false otherwise (not executed) + */ +struct iwl_time_event_notif { + __le32 timestamp; + __le32 session_id; + __le32 unique_id; + __le32 id_and_color; + __le32 action; + __le32 status; +} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ + + +/* Bindings and Time Quota */ + +/** + * struct iwl_binding_cmd - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding + * @phy: PHY id and color which belongs to the binding + */ +struct iwl_binding_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; +} __packed; /* BINDING_CMD_API_S_VER_1 */ + +/* The maximal number of fragments in the FW's schedule session */ +#define IWL_MVM_MAX_QUOTA 128 + +/** + * struct iwl_time_quota_data - configuration of time quota per binding + * @id_and_color: ID and color of the relevant Binding + * @quota: absolute time quota in TU. The scheduler will try to divide the + * remainig quota (after Time Events) according to this quota. + * @max_duration: max uninterrupted context duration in TU + */ +struct iwl_time_quota_data { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; +} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ + +/** + * struct iwl_time_quota_cmd - configuration of time quota between bindings + * ( TIME_QUOTA_CMD = 0x2c ) + * @quotas: allocations per binding + */ +struct iwl_time_quota_cmd { + struct iwl_time_quota_data quotas[MAX_BINDINGS]; +} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ + + +/* PHY context */ + +/* Supported bands */ +#define PHY_BAND_5 (0) +#define PHY_BAND_24 (1) + +/* Supported channel width, vary if there is VHT support */ +#define PHY_VHT_CHANNEL_MODE20 (0x0) +#define PHY_VHT_CHANNEL_MODE40 (0x1) +#define PHY_VHT_CHANNEL_MODE80 (0x2) +#define PHY_VHT_CHANNEL_MODE160 (0x3) + +/* + * Control channel position: + * For legacy set bit means upper channel, otherwise lower. + * For VHT - bit-2 marks if the control is lower/upper relative to center-freq + * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. + * center_freq + * | + * 40Mhz |_______|_______| + * 80Mhz |_______|_______|_______|_______| + * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| + * code 011 010 001 000 | 100 101 110 111 + */ +#define PHY_VHT_CTRL_POS_1_BELOW (0x0) +#define PHY_VHT_CTRL_POS_2_BELOW (0x1) +#define PHY_VHT_CTRL_POS_3_BELOW (0x2) +#define PHY_VHT_CTRL_POS_4_BELOW (0x3) +#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) +#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) +#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) +#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) + +/* + * @band: PHY_BAND_* + * @channel: channel number + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + */ +struct iwl_fw_channel_info { + u8 band; + u8 channel; + u8 width; + u8 ctrl_pos; +} __packed; + +#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) +#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) +#define PHY_RX_CHAIN_VALID_POS (1) +#define PHY_RX_CHAIN_VALID_MSK \ + (0x7 << PHY_RX_CHAIN_VALID_POS) +#define PHY_RX_CHAIN_FORCE_SEL_POS (4) +#define PHY_RX_CHAIN_FORCE_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) +#define PHY_RX_CHAIN_CNT_POS (10) +#define PHY_RX_CHAIN_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_CNT_POS) +#define PHY_RX_CHAIN_MIMO_CNT_POS (12) +#define PHY_RX_CHAIN_MIMO_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) +#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) +#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) + +/* TODO: fix the value, make it depend on firmware at runtime? */ +#define NUM_PHY_CTX 3 + +/* TODO: complete missing documentation */ +/** + * struct iwl_phy_context_cmd - config of the PHY context + * ( PHY_CONTEXT_CMD = 0x8 ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @apply_time: 0 means immediate apply and context switch. + * other value means apply new params after X usecs + * @tx_param_color: ??? + * @channel_info: + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* PHY_CONTEXT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 tx_param_color; + struct iwl_fw_channel_info ci; + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ + +/* + * Aux ROC command + * + * Command requests the firmware to create a time event for a certain duration + * and remain on the given channel. This is done by using the Aux framework in + * the FW. + * The command was first used for Hot Spot issues - but can be used regardless + * to Hot Spot. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the + * event_unique_id should be the id of the time event assigned by ucode. + * Otherwise ignore the event_unique_id. + * @sta_id_and_color: station id and color, resumed during "Remain On Channel" + * activity. + * @channel_info: channel info + * @node_addr: Our MAC Address + * @reserved: reserved for alignment + * @apply_time: GP2 value to start (should always be the current GP2 value) + * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max + * time by which start of the event is allowed to be postponed. + * @duration: event duration in TU To calculate event duration: + * timeEventDuration = min(duration, remainingQuota) + */ +struct iwl_hs20_roc_req { + /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ + __le32 id_and_color; + __le32 action; + __le32 event_unique_id; + __le32 sta_id_and_color; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ + +/* + * values for AUX ROC result values + */ +enum iwl_mvm_hot_spot { + HOT_SPOT_RSP_STATUS_OK, + HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, + HOT_SPOT_MAX_NUM_OF_SESSIONS, +}; + +/* + * Aux ROC command response + * + * In response to iwl_hs20_roc_req the FW sends this command to notify the + * driver the uid of the timevent. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @event_unique_id: Unique ID of time event assigned by ucode + * @status: Return status 0 is success, all the rest used for specific errors + */ +struct iwl_hs20_roc_res { + __le32 event_unique_id; + __le32 status; +} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( RADIO_VERSION_NOTIFICATION = 0x68 ) + * @radio_flavor: + * @radio_step: + * @radio_dash: + */ +struct iwl_radio_version_notif { + __le32 radio_flavor; + __le32 radio_step; + __le32 radio_dash; +} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ + +enum iwl_card_state_flags { + CARD_ENABLED = 0x00, + HW_CARD_DISABLED = 0x01, + SW_CARD_DISABLED = 0x02, + CT_KILL_CARD_DISABLED = 0x04, + HALT_CARD_DISABLED = 0x08, + CARD_DISABLED_MSK = 0x0f, + CARD_IS_RX_ON = 0x10, +}; + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( CARD_STATE_NOTIFICATION = 0xa1 ) + * @flags: %iwl_card_state_flags + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ + +/** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @mac_id: interface ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: + * @num_recvd_beacons: + */ +struct iwl_missed_beacons_notif { + __le32 mac_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ + +/** + * struct iwl_mfuart_load_notif - mfuart image version & status + * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) + * @installed_ver: installed image version + * @external_ver: external image version + * @status: MFUART loading status + * @duration: MFUART loading time +*/ +struct iwl_mfuart_load_notif { + __le32 installed_ver; + __le32 external_ver; + __le32 status; + __le32 duration; +} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/ + +/** + * struct iwl_set_calib_default_cmd - set default value for calibration. + * ( SET_CALIB_DEFAULT_CMD = 0x8e ) + * @calib_index: the calibration to set value for + * @length: of data + * @data: the value to set for the calibration result + */ +struct iwl_set_calib_default_cmd { + __le16 calib_index; + __le16 length; + u8 data[0]; +} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ + +#define MAX_PORT_ID_NUM 2 +#define MAX_MCAST_FILTERING_ADDRESSES 256 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + +#define MAX_BCAST_FILTERS 8 +#define MAX_BCAST_FILTER_ATTRS 2 + +/** + * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet + * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. + * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. + * start of ip payload). + */ +enum iwl_mvm_bcast_filter_attr_offset { + BCAST_FILTER_OFFSET_PAYLOAD_START = 0, + BCAST_FILTER_OFFSET_IP_END = 1, +}; + +/** + * struct iwl_fw_bcast_filter_attr - broadcast filter attribute + * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. + * @offset: starting offset of this pattern. + * @val: value to match - big endian (MSB is the first + * byte to match from offset pos). + * @mask: mask to match (big endian). + */ +struct iwl_fw_bcast_filter_attr { + u8 offset_type; + u8 offset; + __le16 reserved1; + __be32 val; + __be32 mask; +} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ + +/** + * enum iwl_mvm_bcast_filter_frame_type - filter frame type + * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. + * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames + */ +enum iwl_mvm_bcast_filter_frame_type { + BCAST_FILTER_FRAME_TYPE_ALL = 0, + BCAST_FILTER_FRAME_TYPE_IPV4 = 1, +}; + +/** + * struct iwl_fw_bcast_filter - broadcast filter + * @discard: discard frame (1) or let it pass (0). + * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. + * @num_attrs: number of valid attributes in this filter. + * @attrs: attributes of this filter. a filter is considered matched + * only when all its attributes are matched (i.e. AND relationship) + */ +struct iwl_fw_bcast_filter { + u8 discard; + u8 frame_type; + u8 num_attrs; + u8 reserved1; + struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; +} __packed; /* BCAST_FILTER_S_VER_1 */ + +/** + * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. + * @default_discard: default action for this mac (discard (1) / pass (0)). + * @attached_filters: bitmap of relevant filters for this mac. + */ +struct iwl_fw_bcast_mac { + u8 default_discard; + u8 reserved1; + __le16 attached_filters; +} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ + +/** + * struct iwl_bcast_filter_cmd - broadcast filtering configuration + * @disable: enable (0) / disable (1) + * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) + * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) + * @filters: broadcast filters + * @macs: broadcast filtering configuration per-mac + */ +struct iwl_bcast_filter_cmd { + u8 disable; + u8 max_bcast_filters; + u8 max_macs; + u8 reserved1; + struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; + struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; +} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ + +/* + * enum iwl_mvm_marker_id - maker ids + * + * The ids for different type of markers to insert into the usniffer logs + */ +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_mvm_marker - mark info into the usniffer logs + * + * (MARKER_CMD = 0xcb) + * + * Mark the UTC time stamp into the usniffer logs together with additional + * metadata, so the usniffer output can be parsed. + * In the command response the ucode will return the GP2 time. + * + * @dw_len: The amount of dwords following this byte including this byte. + * @marker_id: A unique marker id (iwl_mvm_marker_id). + * @reserved: reserved. + * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC + * @metadata: additional meta data that will be written to the unsiffer log + */ +struct iwl_mvm_marker { + u8 dwLen; + u8 markerId; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +} __packed; /* MARKER_API_S_VER_1 */ + +/* + * enum iwl_dc2dc_config_id - flag ids + * + * Ids of dc2dc configuration flags + */ +enum iwl_dc2dc_config_id { + DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ + DCDC_FREQ_TUNE_SET = 0x2, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_dc2dc_config_cmd - configure dc2dc values + * + * (DC2DC_CONFIG_CMD = 0x83) + * + * Set/Get & configure dc2dc values. + * The command always returns the current dc2dc values. + * + * @flags: set/get dc2dc + * @enable_low_power_mode: not used. + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_cmd { + __le32 flags; + __le32 enable_low_power_mode; /* not used */ + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd + * + * Current dc2dc values returned by the FW. + * + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_resp { + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ + +/*********************************** + * Smart Fifo API + ***********************************/ +/* Smart Fifo state */ +enum iwl_sf_state { + SF_LONG_DELAY_ON = 0, /* should never be called by driver */ + SF_FULL_ON, + SF_UNINIT, + SF_INIT_OFF, + SF_HW_NUM_STATES +}; + +/* Smart Fifo possible scenario */ +enum iwl_sf_scenario { + SF_SCENARIO_SINGLE_UNICAST, + SF_SCENARIO_AGG_UNICAST, + SF_SCENARIO_MULTICAST, + SF_SCENARIO_BA_RESP, + SF_SCENARIO_TX_RESP, + SF_NUM_SCENARIO +}; + +#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ +#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ + +/* smart FIFO default values */ +#define SF_W_MARK_SISO 6144 +#define SF_W_MARK_MIMO2 8192 +#define SF_W_MARK_MIMO3 6144 +#define SF_W_MARK_LEGACY 4096 +#define SF_W_MARK_SCAN 4096 + +/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ +#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ + +/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ +#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ +#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ +#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ +#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ + +#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ + +#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) + +/** + * Smart Fifo configuration command. + * @state: smart fifo state, types listed in enum %iwl_sf_sate. + * @watermark: Minimum allowed availabe free space in RXF for transient state. + * @long_delay_timeouts: aging and idle timer values for each scenario + * in long delay state. + * @full_on_timeouts: timer values for each scenario in full on state. + */ +struct iwl_sf_cfg_cmd { + __le32 state; + __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; + __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; + __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; +} __packed; /* SF_CFG_API_S_VER_2 */ + +/*********************************** + * Location Aware Regulatory (LAR) API - MCC updates + ***********************************/ + +/** + * struct iwl_mcc_update_cmd - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + */ +struct iwl_mcc_update_cmd { + __le16 mcc; + u8 source_id; + u8 reserved; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S */ + +/** + * iwl_mcc_update_resp - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */ + +/** + * struct iwl_mcc_chub_notif - chub notifies of mcc change + * (MCC_CHUB_UPDATE_CMD = 0xc9) + * The Chub (Communication Hub, CommsHUB) is a HW component that connects to + * the cellular and connectivity cores that gets updates of the mcc, and + * notifies the ucode directly of any mcc change. + * The ucode requests the driver to request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: identity of the change originator, see iwl_mcc_source + * @reserved1: reserved for alignment + */ +struct iwl_mcc_chub_notif { + u16 mcc; + u8 source_id; + u8 reserved1; +} __packed; /* LAR_MCC_NOTIFY_S */ + +enum iwl_mcc_update_status { + MCC_RESP_NEW_CHAN_PROFILE, + MCC_RESP_SAME_CHAN_PROFILE, + MCC_RESP_INVALID, + MCC_RESP_NVM_DISABLED, + MCC_RESP_ILLEGAL, + MCC_RESP_LOW_PRIORITY, +}; + +enum iwl_mcc_source { + MCC_SOURCE_OLD_FW = 0, + MCC_SOURCE_ME = 1, + MCC_SOURCE_BIOS = 2, + MCC_SOURCE_3G_LTE_HOST = 3, + MCC_SOURCE_3G_LTE_DEVICE = 4, + MCC_SOURCE_WIFI = 5, + MCC_SOURCE_RESERVED = 6, + MCC_SOURCE_DEFAULT = 7, + MCC_SOURCE_UNINITIALIZED = 8, + MCC_SOURCE_GET_CURRENT = 0x10 +}; + +/* DTS measurements */ + +enum iwl_dts_measurement_flags { + DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), + DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), +}; + +/** + * iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements + * + * @flags: indicates which measurements we want as specified in &enum + * iwl_dts_measurement_flags + */ +struct iwl_dts_measurement_cmd { + __le32 flags; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ + +/** +* enum iwl_dts_control_measurement_mode - DTS measurement type +* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read +* back (latest value. Not waiting for new value). Use automatic +* SW DTS configuration. +* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, +* trigger DTS reading and provide read back temperature read +* when available. +* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read +* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, +* without measurement trigger. +*/ +enum iwl_dts_control_measurement_mode { + DTS_AUTOMATIC = 0, + DTS_REQUEST_READ = 1, + DTS_OVER_WRITE = 2, + DTS_DIRECT_WITHOUT_MEASURE = 3, +}; + +/** +* enum iwl_dts_used - DTS to use or used for measurement in the DTS request +* @DTS_USE_TOP: Top +* @DTS_USE_CHAIN_A: chain A +* @DTS_USE_CHAIN_B: chain B +* @DTS_USE_CHAIN_C: chain C +* @XTAL_TEMPERATURE - read temperature from xtal +*/ +enum iwl_dts_used { + DTS_USE_TOP = 0, + DTS_USE_CHAIN_A = 1, + DTS_USE_CHAIN_B = 2, + DTS_USE_CHAIN_C = 3, + XTAL_TEMPERATURE = 4, +}; + +/** +* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode +* @DTS_BIT6_MODE: bit 6 mode +* @DTS_BIT8_MODE: bit 8 mode +*/ +enum iwl_dts_bit_mode { + DTS_BIT6_MODE = 0, + DTS_BIT8_MODE = 1, +}; + +/** + * iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements + * @control_mode: see &enum iwl_dts_control_measurement_mode + * @temperature: used when over write DTS mode is selected + * @sensor: set temperature sensor to use. See &enum iwl_dts_used + * @avg_factor: average factor to DTS in request DTS read mode + * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode + * @step_duration: step duration for the DTS + */ +struct iwl_ext_dts_measurement_cmd { + __le32 control_mode; + __le32 temperature; + __le32 sensor; + __le32 avg_factor; + __le32 bit_mode; + __le32 step_duration; +} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ + +/** + * iwl_dts_measurement_notif - notification received with the measurements + * + * @temp: the measured temperature + * @voltage: the measured voltage + */ +struct iwl_dts_measurement_notif { + __le32 temp; + __le32 voltage; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ + +/*********************************** + * TDLS API + ***********************************/ + +/* Type of TDLS request */ +enum iwl_tdls_channel_switch_type { + TDLS_SEND_CHAN_SW_REQ = 0, + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, + TDLS_MOVE_CH, +}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ + +/** + * Switch timing sub-element in a TDLS channel-switch command + * @frame_timestamp: GP2 timestamp of channel-switch request/response packet + * received from peer + * @max_offchan_duration: What amount of microseconds out of a DTIM is given + * to the TDLS off-channel communication. For instance if the DTIM is + * 200TU and the TDLS peer is to be given 25% of the time, the value + * given will be 50TU, or 50 * 1024 if translated into microseconds. + * @switch_time: switch time the peer sent in its channel switch timing IE + * @switch_timout: switch timeout the peer sent in its channel switch timing IE + */ +struct iwl_tdls_channel_switch_timing { + __le32 frame_timestamp; /* GP2 time of peer packet Rx */ + __le32 max_offchan_duration; /* given in micro-seconds */ + __le32 switch_time; /* given in micro-seconds */ + __le32 switch_timeout; /* given in micro-seconds */ +} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ + +#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 + +/** + * TDLS channel switch frame template + * + * A template representing a TDLS channel-switch request or response frame + * + * @switch_time_offset: offset to the channel switch timing IE in the template + * @tx_cmd: Tx parameters for the frame + * @data: frame data + */ +struct iwl_tdls_channel_switch_frame { + __le32 switch_time_offset; + struct iwl_tx_cmd tx_cmd; + u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ + +/** + * TDLS channel switch command + * + * The command is sent to initiate a channel switch and also in response to + * incoming TDLS channel-switch request/response packets from remote peers. + * + * @switch_type: see &enum iwl_tdls_channel_switch_type + * @peer_sta_id: station id of TDLS peer + * @ci: channel we switch to + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd { + u8 switch_type; + __le32 peer_sta_id; + struct iwl_fw_channel_info ci; + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ + +/** + * TDLS channel switch start notification + * + * @status: non-zero on success + * @offchannel_duration: duration given in microseconds + * @sta_id: peer currently performing the channel-switch with + */ +struct iwl_tdls_channel_switch_notif { + __le32 status; + __le32 offchannel_duration; + __le32 sta_id; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ + +/** + * TDLS station info + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx + * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer + * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise + */ +struct iwl_tdls_sta_info { + u8 sta_id; + u8 tx_to_peer_tid; + __le16 tx_to_peer_ssn; + __le32 is_initiator; +} __packed; /* TDLS_STA_INFO_VER_1 */ + +/** + * TDLS basic config command + * + * @id_and_color: MAC id and color being configured + * @tdls_peer_count: amount of currently connected TDLS peers + * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx + * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP + * @sta_info: per-station info. Only the first tdls_peer_count entries are set + * @pti_req_data_offset: offset of network-level data for the PTI template + * @pti_req_tx_cmd: Tx parameters for PTI request template + * @pti_req_template: PTI request template data + */ +struct iwl_tdls_config_cmd { + __le32 id_and_color; /* mac id and color */ + u8 tdls_peer_count; + u8 tx_to_ap_tid; + __le16 tx_to_ap_ssn; + struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; + + __le32 pti_req_data_offset; + struct iwl_tx_cmd pti_req_tx_cmd; + u8 pti_req_template[0]; +} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ + +/** + * TDLS per-station config information from FW + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to + * the peer + */ +struct iwl_tdls_config_sta_info_res { + __le16 sta_id; + __le16 tx_to_peer_last_seq; +} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ + +/** + * TDLS config information from FW + * + * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP + * @sta_info: per-station TDLS config information + */ +struct iwl_tdls_config_res { + __le32 tx_to_ap_last_seq; + struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; +} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ + +#define TX_FIFO_MAX_NUM 8 +#define RX_FIFO_MAX_NUM 2 + +/** + * Shared memory configuration information from the FW + * + * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not + * accessible) + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to + * 0x0 as accessible only via DBGM RDAT) + * @sample_buff_size: internal sample buff size + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre + * 8000 HW set to 0x0 as not accessible) + * @txfifo_size: size of TXF0 ... TXF7 + * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + */ +struct iwl_shared_mem_cfg { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 page_buff_addr; + __le32 page_buff_size; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ + +#endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c new file mode 100644 index 000000000000..d906fa13ba97 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -0,0 +1,1166 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include + +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-fw.h" +#include "iwl-debug.h" +#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ +#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ +#include "iwl-prph.h" +#include "iwl-eeprom-parse.h" + +#include "mvm.h" +#include "iwl-phy-db.h" + +#define MVM_UCODE_ALIVE_TIMEOUT HZ +#define MVM_UCODE_CALIB_TIMEOUT (2*HZ) + +#define UCODE_VALID_OK cpu_to_le32(0x1) + +struct iwl_mvm_alive_data { + bool valid; + u32 scd_base_addr; +}; + +static inline const struct fw_img * +iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) +{ + if (ucode_type >= IWL_UCODE_TYPE_MAX) + return NULL; + + return &mvm->fw->img[ucode_type]; +} + +static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) +{ + struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { + .valid = cpu_to_le32(valid_tx_ant), + }; + + IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); + return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, + sizeof(tx_ant_cmd), &tx_ant_cmd); +} + +static void iwl_free_fw_paging(struct iwl_mvm *mvm) +{ + int i; + + if (!mvm->fw_paging_db[0].fw_paging_block) + return; + + for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { + if (!mvm->fw_paging_db[i].fw_paging_block) { + IWL_DEBUG_FW(mvm, + "Paging: block %d already freed, continue to next page\n", + i); + + continue; + } + + __free_pages(mvm->fw_paging_db[i].fw_paging_block, + get_order(mvm->fw_paging_db[i].fw_paging_size)); + } + kfree(mvm->trans->paging_download_buf); + memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); +} + +static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) +{ + int sec_idx, idx; + u32 offset = 0; + + /* + * find where is the paging image start point: + * if CPU2 exist and it's in paging format, then the image looks like: + * CPU1 sections (2 or more) + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 + * CPU2 sections (not paged) + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 + * non paged to CPU2 paging sec + * CPU2 paging CSS + * CPU2 paging image (including instruction and data) + */ + for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { + if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { + sec_idx++; + break; + } + } + + if (sec_idx >= IWL_UCODE_SECTION_MAX) { + IWL_ERR(mvm, "driver didn't find paging image\n"); + iwl_free_fw_paging(mvm); + return -EINVAL; + } + + /* copy the CSS block to the dram */ + IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", + sec_idx); + + memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), + image->sec[sec_idx].data, + mvm->fw_paging_db[0].fw_paging_size); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d CSS bytes to first block\n", + mvm->fw_paging_db[0].fw_paging_size); + + sec_idx++; + + /* + * copy the paging blocks to the dram + * loop index start from 1 since that CSS block already copied to dram + * and CSS index is 0. + * loop stop at num_of_paging_blk since that last block is not full. + */ + for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { + memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + image->sec[sec_idx].data + offset, + mvm->fw_paging_db[idx].fw_paging_size); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d paging bytes to block %d\n", + mvm->fw_paging_db[idx].fw_paging_size, + idx); + + offset += mvm->fw_paging_db[idx].fw_paging_size; + } + + /* copy the last paging block */ + if (mvm->num_of_pages_in_last_blk > 0) { + memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + image->sec[sec_idx].data + offset, + FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d pages in the last block %d\n", + mvm->num_of_pages_in_last_blk, idx); + } + + return 0; +} + +static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, + const struct fw_img *image) +{ + struct page *block; + dma_addr_t phys = 0; + int blk_idx = 0; + int order, num_of_pages; + int dma_enabled; + + if (mvm->fw_paging_db[0].fw_paging_block) + return 0; + + dma_enabled = is_device_dma_capable(mvm->trans->dev); + + /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ + BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); + + num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; + mvm->num_of_paging_blk = ((num_of_pages - 1) / + NUM_OF_PAGE_PER_GROUP) + 1; + + mvm->num_of_pages_in_last_blk = + num_of_pages - + NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); + + IWL_DEBUG_FW(mvm, + "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", + mvm->num_of_paging_blk, + mvm->num_of_pages_in_last_blk); + + /* allocate block of 4Kbytes for paging CSS */ + order = get_order(FW_PAGING_SIZE); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + + mvm->fw_paging_db[blk_idx].fw_paging_block = block; + mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; + + if (dma_enabled) { + phys = dma_map_page(mvm->trans->dev, block, 0, + PAGE_SIZE << order, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mvm->trans->dev, phys)) { + /* + * free the previous pages and the current one since + * we failed to map_page. + */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + IWL_DEBUG_FW(mvm, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + + /* + * allocate blocks in dram. + * since that CSS allocated in fw_paging_db[0] loop start from index 1 + */ + for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + /* allocate block of PAGING_BLOCK_SIZE (32K) */ + order = get_order(PAGING_BLOCK_SIZE); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + + mvm->fw_paging_db[blk_idx].fw_paging_block = block; + mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; + + if (dma_enabled) { + phys = dma_map_page(mvm->trans->dev, block, 0, + PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(mvm->trans->dev, phys)) { + /* + * free the previous pages and the current one + * since we failed to map_page. + */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + mvm->fw_paging_db[blk_idx].fw_paging_phys = + PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + IWL_DEBUG_FW(mvm, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); + } + + return 0; +} + +static int iwl_save_fw_paging(struct iwl_mvm *mvm, + const struct fw_img *fw) +{ + int ret; + + ret = iwl_alloc_fw_paging_mem(mvm, fw); + if (ret) + return ret; + + return iwl_fill_paging_mem(mvm, fw); +} + +/* send paging cmd to FW in case CPU2 has paging image */ +static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) +{ + int blk_idx; + __le32 dev_phy_addr; + struct iwl_fw_paging_cmd fw_paging_cmd = { + .flags = + cpu_to_le32(PAGING_CMD_IS_SECURED | + PAGING_CMD_IS_ENABLED | + (mvm->num_of_pages_in_last_blk << + PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), + .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), + .block_num = cpu_to_le32(mvm->num_of_paging_blk), + }; + + /* loop for for all paging blocks + CSS block */ + for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + dev_phy_addr = + cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> + PAGE_2_EXP_SIZE); + fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; + } + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(fw_paging_cmd), &fw_paging_cmd); +} + +/* + * Send paging item cmd to FW in case CPU2 has paging image + */ +static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) +{ + int ret; + struct iwl_fw_get_item_cmd fw_get_item_cmd = { + .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), + }; + + struct iwl_fw_get_item_resp *item_resp; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &fw_get_item_cmd, }, + }; + + cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, + "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", + ret); + return ret; + } + + item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; + if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { + IWL_ERR(mvm, + "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", + le32_to_cpu(item_resp->item_id)); + ret = -EIO; + goto exit; + } + + mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, + GFP_KERNEL); + if (!mvm->trans->paging_download_buf) { + ret = -ENOMEM; + goto exit; + } + mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); + mvm->trans->paging_db = mvm->fw_paging_db; + IWL_DEBUG_FW(mvm, + "Paging: got paging request address (paging_req_addr 0x%08x)\n", + mvm->trans->paging_req_addr); + +exit: + iwl_free_resp(&cmd); + + return ret; +} + +static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_alive_data *alive_data = data; + struct mvm_alive_resp_ver1 *palive1; + struct mvm_alive_resp_ver2 *palive2; + struct mvm_alive_resp *palive; + + if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { + palive1 = (void *)pkt->data; + + mvm->support_umac_log = false; + mvm->error_event_table = + le32_to_cpu(palive1->error_event_table_ptr); + mvm->log_event_table = + le32_to_cpu(palive1->log_event_table_ptr); + alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); + + alive_data->valid = le16_to_cpu(palive1->status) == + IWL_ALIVE_STATUS_OK; + IWL_DEBUG_FW(mvm, + "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", + le16_to_cpu(palive1->status), palive1->ver_type, + palive1->ver_subtype, palive1->flags); + } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { + palive2 = (void *)pkt->data; + + mvm->error_event_table = + le32_to_cpu(palive2->error_event_table_ptr); + mvm->log_event_table = + le32_to_cpu(palive2->log_event_table_ptr); + alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); + mvm->umac_error_event_table = + le32_to_cpu(palive2->error_info_addr); + mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr); + mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size); + + alive_data->valid = le16_to_cpu(palive2->status) == + IWL_ALIVE_STATUS_OK; + if (mvm->umac_error_event_table) + mvm->support_umac_log = true; + + IWL_DEBUG_FW(mvm, + "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", + le16_to_cpu(palive2->status), palive2->ver_type, + palive2->ver_subtype, palive2->flags); + + IWL_DEBUG_FW(mvm, + "UMAC version: Major - 0x%x, Minor - 0x%x\n", + palive2->umac_major, palive2->umac_minor); + } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { + palive = (void *)pkt->data; + + mvm->error_event_table = + le32_to_cpu(palive->error_event_table_ptr); + mvm->log_event_table = + le32_to_cpu(palive->log_event_table_ptr); + alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); + mvm->umac_error_event_table = + le32_to_cpu(palive->error_info_addr); + mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); + mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); + + alive_data->valid = le16_to_cpu(palive->status) == + IWL_ALIVE_STATUS_OK; + if (mvm->umac_error_event_table) + mvm->support_umac_log = true; + + IWL_DEBUG_FW(mvm, + "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", + le16_to_cpu(palive->status), palive->ver_type, + palive->ver_subtype, palive->flags); + + IWL_DEBUG_FW(mvm, + "UMAC version: Major - 0x%x, Minor - 0x%x\n", + le32_to_cpu(palive->umac_major), + le32_to_cpu(palive->umac_minor)); + } + + return true; +} + +static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_phy_db *phy_db = data; + + if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { + WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); + return true; + } + + WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); + + return false; +} + +static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, + enum iwl_ucode_type ucode_type) +{ + struct iwl_notification_wait alive_wait; + struct iwl_mvm_alive_data alive_data; + const struct fw_img *fw; + int ret, i; + enum iwl_ucode_type old_type = mvm->cur_ucode; + static const u16 alive_cmd[] = { MVM_ALIVE }; + struct iwl_sf_region st_fwrd_space; + + if (ucode_type == IWL_UCODE_REGULAR && + iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) + fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); + else + fw = iwl_get_ucode_image(mvm, ucode_type); + if (WARN_ON(!fw)) + return -EINVAL; + mvm->cur_ucode = ucode_type; + mvm->ucode_loaded = false; + + iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, + alive_cmd, ARRAY_SIZE(alive_cmd), + iwl_alive_fn, &alive_data); + + ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); + if (ret) { + mvm->cur_ucode = old_type; + iwl_remove_notification(&mvm->notif_wait, &alive_wait); + return ret; + } + + /* + * Some things may run in the background now, but we + * just wait for the ALIVE notification here. + */ + ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, + MVM_UCODE_ALIVE_TIMEOUT); + if (ret) { + if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + IWL_ERR(mvm, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), + iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); + mvm->cur_ucode = old_type; + return ret; + } + + if (!alive_data.valid) { + IWL_ERR(mvm, "Loaded ucode is not valid!\n"); + mvm->cur_ucode = old_type; + return -EIO; + } + + /* + * update the sdio allocation according to the pointer we get in the + * alive notification. + */ + st_fwrd_space.addr = mvm->sf_space.addr; + st_fwrd_space.size = mvm->sf_space.size; + ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); + if (ret) { + IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); + return ret; + } + + iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + + /* + * configure and operate fw paging mechanism. + * driver configures the paging flow only once, CPU2 paging image + * included in the IWL_UCODE_INIT image. + */ + if (fw->paging_mem_size) { + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(mvm->trans->dev)) { + ret = iwl_trans_get_paging_item(mvm); + if (ret) { + IWL_ERR(mvm, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to send the paging cmd\n"); + iwl_free_fw_paging(mvm); + return ret; + } + } + + /* + * Note: all the queues are enabled as part of the interface + * initialization, but in firmware restart scenarios they + * could be stopped, so wake them up. In firmware restart, + * mac80211 will have the queues stopped as well until the + * reconfiguration completes. During normal startup, they + * will be empty. + */ + + memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); + mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; + + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) + atomic_set(&mvm->mac80211_queue_stop_count[i], 0); + + mvm->ucode_loaded = true; + + return 0; +} + +static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) +{ + struct iwl_phy_cfg_cmd phy_cfg_cmd; + enum iwl_ucode_type ucode_type = mvm->cur_ucode; + + /* Set parameters */ + phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); + phy_cfg_cmd.calib_control.event_trigger = + mvm->fw->default_calib[ucode_type].event_trigger; + phy_cfg_cmd.calib_control.flow_trigger = + mvm->fw->default_calib[ucode_type].flow_trigger; + + IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", + phy_cfg_cmd.phy_cfg); + + return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, + sizeof(phy_cfg_cmd), &phy_cfg_cmd); +} + +int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +{ + struct iwl_notification_wait calib_wait; + static const u16 init_complete[] = { + INIT_COMPLETE_NOTIF, + CALIB_RES_NOTIF_PHY_DB + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(mvm->calibrating)) + return 0; + + iwl_init_notification_wait(&mvm->notif_wait, + &calib_wait, + init_complete, + ARRAY_SIZE(init_complete), + iwl_wait_phy_db_entry, + mvm->phy_db); + + /* Will also start the device */ + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); + if (ret) { + IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); + goto error; + } + + ret = iwl_send_bt_init_conf(mvm); + if (ret) + goto error; + + /* Read the NVM only at driver load time, no need to do this twice */ + if (read_nvm) { + /* Read nvm */ + ret = iwl_nvm_init(mvm, true); + if (ret) { + IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); + goto error; + } + } + + /* In case we read the NVM from external file, load it to the NIC */ + if (mvm->nvm_file_name) + iwl_mvm_load_nvm_to_nic(mvm); + + ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); + WARN_ON(ret); + + /* + * abort after reading the nvm in case RF Kill is on, we will complete + * the init seq later when RF kill will switch to off + */ + if (iwl_mvm_is_radio_hw_killed(mvm)) { + IWL_DEBUG_RF_KILL(mvm, + "jump over all phy activities due to RF kill\n"); + iwl_remove_notification(&mvm->notif_wait, &calib_wait); + ret = 1; + goto out; + } + + mvm->calibrating = true; + + /* Send TX valid antennas before triggering calibrations */ + ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); + if (ret) + goto error; + + /* + * Send phy configurations command to init uCode + * to start the 16.0 uCode init image internal calibrations. + */ + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", + ret); + goto error; + } + + /* + * Some things may run in the background now, but we + * just wait for the calibration complete notification. + */ + ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, + MVM_UCODE_CALIB_TIMEOUT); + + if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { + IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); + ret = 1; + } + goto out; + +error: + iwl_remove_notification(&mvm->notif_wait, &calib_wait); +out: + mvm->calibrating = false; + if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { + /* we want to debug INIT and we have no NVM - fake */ + mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + + sizeof(struct ieee80211_channel) + + sizeof(struct ieee80211_rate), + GFP_KERNEL); + if (!mvm->nvm_data) + return -ENOMEM; + mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; + mvm->nvm_data->bands[0].n_channels = 1; + mvm->nvm_data->bands[0].n_bitrates = 1; + mvm->nvm_data->bands[0].bitrates = + (void *)mvm->nvm_data->channels + 1; + mvm->nvm_data->bands[0].bitrates->hw_value = 10; + } + + return ret; +} + +static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) +{ + struct iwl_host_cmd cmd = { + .id = SHARED_MEM_CFG, + .flags = CMD_WANT_SKB, + .data = { NULL, }, + .len = { 0, }, + }; + struct iwl_rx_packet *pkt; + struct iwl_shared_mem_cfg *mem_cfg; + u32 i; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) + return; + + pkt = cmd.resp_pkt; + mem_cfg = (void *)pkt->data; + + mvm->shared_mem_cfg.shared_mem_addr = + le32_to_cpu(mem_cfg->shared_mem_addr); + mvm->shared_mem_cfg.shared_mem_size = + le32_to_cpu(mem_cfg->shared_mem_size); + mvm->shared_mem_cfg.sample_buff_addr = + le32_to_cpu(mem_cfg->sample_buff_addr); + mvm->shared_mem_cfg.sample_buff_size = + le32_to_cpu(mem_cfg->sample_buff_size); + mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr); + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) + mvm->shared_mem_cfg.txfifo_size[i] = + le32_to_cpu(mem_cfg->txfifo_size[i]); + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) + mvm->shared_mem_cfg.rxfifo_size[i] = + le32_to_cpu(mem_cfg->rxfifo_size[i]); + mvm->shared_mem_cfg.page_buff_addr = + le32_to_cpu(mem_cfg->page_buff_addr); + mvm->shared_mem_cfg.page_buff_size = + le32_to_cpu(mem_cfg->page_buff_size); + IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); + + iwl_free_resp(&cmd); +} + +int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, + struct iwl_mvm_dump_desc *desc, + struct iwl_fw_dbg_trigger_tlv *trigger) +{ + unsigned int delay = 0; + + if (trigger) + delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + + if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) + return -EBUSY; + + if (WARN_ON(mvm->fw_dump_desc)) + iwl_mvm_free_fw_dump_desc(mvm); + + IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", + le32_to_cpu(desc->trig_desc.type)); + + mvm->fw_dump_desc = desc; + mvm->fw_dump_trig = trigger; + + queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); + + return 0; +} + +int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger) +{ + struct iwl_mvm_dump_desc *desc; + + desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + desc->len = len; + desc->trig_desc.type = cpu_to_le32(trig); + memcpy(desc->trig_desc.data, str, len); + + return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); +} + +int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) +{ + u16 occurrences = le16_to_cpu(trigger->occurrences); + int ret, len = 0; + char buf[64]; + + if (!occurrences) + return 0; + + if (fmt) { + va_list ap; + + buf[sizeof(buf) - 1] = '\0'; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + /* check for truncation */ + if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) + buf[sizeof(buf) - 1] = '\0'; + + len = strlen(buf) + 1; + } + + ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, + trigger); + + if (ret) + return ret; + + trigger->occurrences = cpu_to_le16(occurrences - 1); + return 0; +} + +static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) +{ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); + else + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); +} + +int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) +{ + u8 *ptr; + int ret; + int i; + + if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), + "Invalid configuration %d\n", conf_id)) + return -EINVAL; + + /* EARLY START - firmware's configuration is hard coded */ + if ((!mvm->fw->dbg_conf_tlv[conf_id] || + !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + conf_id == FW_DBG_START_FROM_ALIVE) { + iwl_mvm_restart_early_start(mvm); + return 0; + } + + if (!mvm->fw->dbg_conf_tlv[conf_id]) + return -EINVAL; + + if (mvm->fw_dbg_conf != FW_DBG_INVALID) + IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", + mvm->fw_dbg_conf); + + /* Send all HCMDs for configuring the FW debug */ + ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; + for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, + le16_to_cpu(cmd->len), cmd->data); + if (ret) + return ret; + + ptr += sizeof(*cmd); + ptr += le16_to_cpu(cmd->len); + } + + mvm->fw_dbg_conf = conf_id; + return ret; +} + +static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) +{ + struct iwl_ltr_config_cmd cmd = { + .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), + }; + + if (!mvm->trans->ltr_enabled) + return 0; + + return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, + sizeof(cmd), &cmd); +} + +int iwl_mvm_up(struct iwl_mvm *mvm) +{ + int ret, i; + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; + + /* + * If we haven't completed the run of the init ucode during + * module loading, load init ucode now + * (for example, if we were in RFKILL) + */ + ret = iwl_run_init_mvm_ucode(mvm, false); + if (ret && !iwlmvm_mod_params.init_dbg) { + IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + /* this can't happen */ + if (WARN_ON(ret > 0)) + ret = -ERFKILL; + goto error; + } + if (!iwlmvm_mod_params.init_dbg) { + /* + * Stop and start the transport without entering low power + * mode. This will save the state of other components on the + * device that are triggered by the INIT firwmare (MFUART). + */ + _iwl_trans_stop_device(mvm->trans, false); + ret = _iwl_trans_start_hw(mvm->trans, false); + if (ret) + goto error; + } + + if (iwlmvm_mod_params.init_dbg) + return 0; + + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + iwl_mvm_get_shared_mem_conf(mvm); + + ret = iwl_mvm_sf_update(mvm, NULL, false); + if (ret) + IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); + + mvm->fw_dbg_conf = FW_DBG_INVALID; + /* if we have a destination, assume EARLY START */ + if (mvm->fw->dbg_dest_tlv) + mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; + iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); + + ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); + if (ret) + goto error; + + ret = iwl_send_bt_init_conf(mvm); + if (ret) + goto error; + + /* Send phy db control command and then phy db calibration*/ + ret = iwl_send_phy_db_data(mvm->phy_db); + if (ret) + goto error; + + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + + /* init the fw <-> mac80211 STA mapping */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + + mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + + /* reset quota debouncing buffer - 0xff will yield invalid data */ + memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); + + /* Add auxiliary station for scanning */ + ret = iwl_mvm_add_aux_sta(mvm); + if (ret) + goto error; + + /* Add all the PHY contexts */ + chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + for (i = 0; i < NUM_PHY_CTX; i++) { + /* + * The channel used here isn't relevant as it's + * going to be overwritten in the other flows. + * For now use the first channel we have. + */ + ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], + &chandef, 1, 1); + if (ret) + goto error; + } + + /* Initialize tx backoffs to the minimal possible */ + iwl_mvm_tt_tx_backoff(mvm, 0); + + WARN_ON(iwl_mvm_config_ltr(mvm)); + + ret = iwl_mvm_power_update_device(mvm); + if (ret) + goto error; + + /* + * RTNL is not taken during Ct-kill, but we don't need to scan/Tx + * anyway, so don't init MCC. + */ + if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { + ret = iwl_mvm_init_mcc(mvm); + if (ret) + goto error; + } + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + ret = iwl_mvm_config_scan(mvm); + if (ret) + goto error; + } + + if (iwl_mvm_is_csum_supported(mvm) && + mvm->cfg->features & NETIF_F_RXCSUM) + iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); + + /* allow FW/transport low power modes if not during restart */ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); + return 0; + error: + iwl_trans_stop_device(mvm->trans); + return ret; +} + +int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) +{ + int ret, i; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; + + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); + if (ret) { + IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); + goto error; + } + + ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); + if (ret) + goto error; + + /* Send phy db control command and then phy db calibration*/ + ret = iwl_send_phy_db_data(mvm->phy_db); + if (ret) + goto error; + + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; + + /* init the fw <-> mac80211 STA mapping */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + + /* Add auxiliary station for scanning */ + ret = iwl_mvm_add_aux_sta(mvm); + if (ret) + goto error; + + return 0; + error: + iwl_trans_stop_device(mvm->trans); + return ret; +} + +void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; + u32 flags = le32_to_cpu(card_state_notif->flags); + + IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_KILL_CARD_DISABLED) ? + "Reached" : "Not reached"); +} + +void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; + + IWL_DEBUG_INFO(mvm, + "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", + le32_to_cpu(mfuart_notif->installed_ver), + le32_to_cpu(mfuart_notif->external_ver), + le32_to_cpu(mfuart_notif->status), + le32_to_cpu(mfuart_notif->duration)); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c new file mode 100644 index 000000000000..e3b3cf4dbd77 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -0,0 +1,136 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include "iwl-io.h" +#include "iwl-csr.h" +#include "mvm.h" + +/* Set led register on */ +static void iwl_mvm_led_enable(struct iwl_mvm *mvm) +{ + iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); +} + +/* Set led register off */ +static void iwl_mvm_led_disable(struct iwl_mvm *mvm) +{ + iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF); +} + +static void iwl_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led); + if (brightness > 0) + iwl_mvm_led_enable(mvm); + else + iwl_mvm_led_disable(mvm); +} + +int iwl_mvm_leds_init(struct iwl_mvm *mvm) +{ + int mode = iwlwifi_mod_params.led_mode; + int ret; + + switch (mode) { + case IWL_LED_BLINK: + IWL_ERR(mvm, "Blink led mode not supported, used default\n"); + case IWL_LED_DEFAULT: + case IWL_LED_RF_STATE: + mode = IWL_LED_RF_STATE; + break; + case IWL_LED_DISABLE: + IWL_INFO(mvm, "Led disabled\n"); + return 0; + default: + return -EINVAL; + } + + mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(mvm->hw->wiphy)); + mvm->led.brightness_set = iwl_led_brightness_set; + mvm->led.max_brightness = 1; + + if (mode == IWL_LED_RF_STATE) + mvm->led.default_trigger = + ieee80211_get_radio_led_name(mvm->hw); + + ret = led_classdev_register(mvm->trans->dev, &mvm->led); + if (ret) { + kfree(mvm->led.name); + IWL_INFO(mvm, "Failed to enable led\n"); + return ret; + } + + return 0; +} + +void iwl_mvm_leds_exit(struct iwl_mvm *mvm) +{ + if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE) + return; + + led_classdev_unregister(&mvm->led); + kfree(mvm->led.name); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c new file mode 100644 index 000000000000..ad7ad720d2e7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -0,0 +1,1452 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include "iwl-io.h" +#include "iwl-prph.h" +#include "fw-api.h" +#include "mvm.h" +#include "time-event.h" + +const u8 iwl_mvm_ac_to_tx_fifo[] = { + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_BK, +}; + +struct iwl_mvm_mac_iface_iterator_data { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; + unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; + unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; + enum iwl_tsf_id preferred_tsf; + bool found_vif; +}; + +struct iwl_mvm_hw_queues_iface_iterator_data { + struct ieee80211_vif *exclude_vif; + unsigned long used_hw_queues; +}; + +static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_mac_iface_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 min_bi; + + /* Skip the interface for which we are trying to assign a tsf_id */ + if (vif == data->vif) + return; + + /* + * The TSF is a hardware/firmware resource, there are 4 and + * the driver should assign and free them as needed. However, + * there are cases where 2 MACs should share the same TSF ID + * for the purpose of clock sync, an optimization to avoid + * clock drift causing overlapping TBTTs/DTIMs for a GO and + * client in the system. + * + * The firmware will decide according to the MAC type which + * will be the master and slave. Clients that need to sync + * with a remote station will be the master, and an AP or GO + * will be the slave. + * + * Depending on the new interface type it can be slaved to + * or become the master of an existing interface. + */ + switch (data->vif->type) { + case NL80211_IFTYPE_STATION: + /* + * The new interface is a client, so if the one we're iterating + * is an AP, and the beacon interval of the AP is a multiple or + * divisor of the beacon interval of the client, the same TSF + * should be used to avoid drift between the new client and + * existing AP. The existing AP will get drift updates from the + * new client context in this case. + */ + if (vif->type != NL80211_IFTYPE_AP || + data->preferred_tsf != NUM_TSF_IDS || + !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) + break; + + min_bi = min(data->vif->bss_conf.beacon_int, + vif->bss_conf.beacon_int); + + if (!min_bi) + break; + + if ((data->vif->bss_conf.beacon_int - + vif->bss_conf.beacon_int) % min_bi == 0) { + data->preferred_tsf = mvmvif->tsf_id; + return; + } + break; + + case NL80211_IFTYPE_AP: + /* + * The new interface is AP/GO, so if its beacon interval is a + * multiple or a divisor of the beacon interval of an existing + * interface, it should get drift updates from an existing + * client or use the same TSF as an existing GO. There's no + * drift between TSFs internally but if they used different + * TSFs then a new client MAC could update one of them and + * cause drift that way. + */ + if ((vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_STATION) || + data->preferred_tsf != NUM_TSF_IDS || + !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) + break; + + min_bi = min(data->vif->bss_conf.beacon_int, + vif->bss_conf.beacon_int); + + if (!min_bi) + break; + + if ((data->vif->bss_conf.beacon_int - + vif->bss_conf.beacon_int) % min_bi == 0) { + data->preferred_tsf = mvmvif->tsf_id; + return; + } + break; + default: + /* + * For all other interface types there's no need to + * take drift into account. Either they're exclusive + * like IBSS and monitor, or we don't care much about + * their TSF (like P2P Device), but we won't be able + * to share the TSF resource. + */ + break; + } + + /* + * Unless we exited above, we can't share the TSF resource + * that the virtual interface we're iterating over is using + * with the new one, so clear the available bit and if this + * was the preferred one, reset that as well. + */ + __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); + + if (data->preferred_tsf == mvmvif->tsf_id) + data->preferred_tsf = NUM_TSF_IDS; +} + +/* + * Get the mask of the queues used by the vif + */ +u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) +{ + u32 qmask = 0, ac; + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + return BIT(IWL_MVM_OFFCHANNEL_QUEUE); + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + qmask |= BIT(vif->hw_queue[ac]); + } + + if (vif->type == NL80211_IFTYPE_AP) + qmask |= BIT(vif->cab_queue); + + return qmask; +} + +static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; + + /* exclude the given vif */ + if (vif == data->exclude_vif) + return; + + data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); +} + +static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* Mark the queues used by the sta */ + data->used_hw_queues |= mvmsta->tfd_queue_msk; +} + +unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *exclude_vif) +{ + u8 sta_id; + struct iwl_mvm_hw_queues_iface_iterator_data data = { + .exclude_vif = exclude_vif, + .used_hw_queues = + BIT(IWL_MVM_OFFCHANNEL_QUEUE) | + BIT(mvm->aux_queue) | + BIT(IWL_MVM_CMD_QUEUE), + }; + + lockdep_assert_held(&mvm->mutex); + + /* mark all VIF used hw queues */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_iface_hw_queues_iter, &data); + + /* don't assign the same hw queues as TDLS stations */ + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_mac_sta_hw_queues_iter, + &data); + + /* + * Some TDLS stations may be removed but are in the process of being + * drained. Don't touch their queues. + */ + for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) + data.used_hw_queues |= mvm->tfd_drained[sta_id]; + + return data.used_hw_queues; +} + +static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_mac_iface_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* Iterator may already find the interface being added -- skip it */ + if (vif == data->vif) { + data->found_vif = true; + return; + } + + /* Mark MAC IDs as used by clearing the available bit, and + * (below) mark TSFs as used if their existing use is not + * compatible with the new interface type. + * No locking or atomic bit operations are needed since the + * data is on the stack of the caller function. + */ + __clear_bit(mvmvif->id, data->available_mac_ids); + + /* find a suitable tsf_id */ + iwl_mvm_mac_tsf_id_iter(_data, mac, vif); +} + +void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_mac_iface_iterator_data data = { + .mvm = mvm, + .vif = vif, + .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, + /* no preference yet */ + .preferred_tsf = NUM_TSF_IDS, + }; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_mac_tsf_id_iter, &data); + + if (data.preferred_tsf != NUM_TSF_IDS) + mvmvif->tsf_id = data.preferred_tsf; + else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) + mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, + NUM_TSF_IDS); +} + +static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_mac_iface_iterator_data data = { + .mvm = mvm, + .vif = vif, + .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, + .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, + /* no preference yet */ + .preferred_tsf = NUM_TSF_IDS, + .found_vif = false, + }; + u32 ac; + int ret, i; + unsigned long used_hw_queues; + + /* + * Allocate a MAC ID and a TSF for this MAC, along with the queues + * and other resources. + */ + + /* + * Before the iterator, we start with all MAC IDs and TSFs available. + * + * During iteration, all MAC IDs are cleared that are in use by other + * virtual interfaces, and all TSF IDs are cleared that can't be used + * by this new virtual interface because they're used by an interface + * that can't share it with the new one. + * At the same time, we check if there's a preferred TSF in the case + * that we should share it with another interface. + */ + + /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ + switch (vif->type) { + case NL80211_IFTYPE_ADHOC: + break; + case NL80211_IFTYPE_STATION: + if (!vif->p2p) + break; + /* fall through */ + default: + __clear_bit(0, data.available_mac_ids); + } + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_mac_iface_iterator, &data); + + used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif); + + /* + * In the case we're getting here during resume, it's similar to + * firmware restart, and with RESUME_ALL the iterator will find + * the vif being added already. + * We don't want to reassign any IDs in either case since doing + * so would probably assign different IDs (as interfaces aren't + * necessarily added in the same order), but the old IDs were + * preserved anyway, so skip ID assignment for both resume and + * recovery. + */ + if (data.found_vif) + return 0; + + /* Therefore, in recovery, we can't get here */ + if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) + return -EBUSY; + + mvmvif->id = find_first_bit(data.available_mac_ids, + NUM_MAC_INDEX_DRIVER); + if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { + IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); + ret = -EIO; + goto exit_fail; + } + + if (data.preferred_tsf != NUM_TSF_IDS) + mvmvif->tsf_id = data.preferred_tsf; + else + mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, + NUM_TSF_IDS); + if (mvmvif->tsf_id == NUM_TSF_IDS) { + IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); + ret = -EIO; + goto exit_fail; + } + + mvmvif->color = 0; + + INIT_LIST_HEAD(&mvmvif->time_event_data.list); + mvmvif->time_event_data.id = TE_MAX; + + /* No need to allocate data queues to P2P Device MAC.*/ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; + + return 0; + } + + /* Find available queues, and allocate them to the ACs */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + u8 queue = find_first_zero_bit(&used_hw_queues, + mvm->first_agg_queue); + + if (queue >= mvm->first_agg_queue) { + IWL_ERR(mvm, "Failed to allocate queue\n"); + ret = -EIO; + goto exit_fail; + } + + __set_bit(queue, &used_hw_queues); + vif->hw_queue[ac] = queue; + } + + /* Allocate the CAB queue for softAP and GO interfaces */ + if (vif->type == NL80211_IFTYPE_AP) { + u8 queue = find_first_zero_bit(&used_hw_queues, + mvm->first_agg_queue); + + if (queue >= mvm->first_agg_queue) { + IWL_ERR(mvm, "Failed to allocate cab queue\n"); + ret = -EIO; + goto exit_fail; + } + + vif->cab_queue = queue; + } else { + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + } + + mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) + mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; + + return 0; + +exit_fail: + memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); + memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue)); + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + return ret; +} + +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + u32 ac; + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif); + if (ret) + return ret; + + switch (vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); + break; + case NL80211_IFTYPE_AP: + iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, + IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); + /* fall through */ + default: + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], + vif->hw_queue[ac], + iwl_mvm_ac_to_tx_fifo[ac], 0, + wdg_timeout); + break; + } + + return 0; +} + +void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + int ac; + + lockdep_assert_held(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT, + 0); + break; + case NL80211_IFTYPE_AP: + iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, + IWL_MAX_TID_COUNT, 0); + /* fall through */ + default: + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], + vif->hw_queue[ac], + IWL_MAX_TID_COUNT, 0); + } +} + +static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum ieee80211_band band, + u8 *cck_rates, u8 *ofdm_rates) +{ + struct ieee80211_supported_band *sband; + unsigned long basic = vif->bss_conf.basic_rates; + int lowest_present_ofdm = 100; + int lowest_present_cck = 100; + u8 cck = 0; + u8 ofdm = 0; + int i; + + sband = mvm->hw->wiphy->bands[band]; + + for_each_set_bit(i, &basic, BITS_PER_LONG) { + int hw = sband->bitrates[i].hw_value; + if (hw >= IWL_FIRST_OFDM_RATE) { + ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); + if (lowest_present_ofdm > hw) + lowest_present_ofdm = hw; + } else { + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + cck |= BIT(hw); + if (lowest_present_cck > hw) + lowest_present_cck = hw; + } + } + + /* + * Now we've got the basic rates as bitmaps in the ofdm and cck + * variables. This isn't sufficient though, as there might not + * be all the right rates in the bitmap. E.g. if the only basic + * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps + * and 6 Mbps because the 802.11-2007 standard says in 9.6: + * + * [...] a STA responding to a received frame shall transmit + * its Control Response frame [...] at the highest rate in the + * BSSBasicRateSet parameter that is less than or equal to the + * rate of the immediately previous frame in the frame exchange + * sequence ([...]) and that is of the same modulation class + * ([...]) as the received frame. If no rate contained in the + * BSSBasicRateSet parameter meets these conditions, then the + * control frame sent in response to a received frame shall be + * transmitted at the highest mandatory rate of the PHY that is + * less than or equal to the rate of the received frame, and + * that is of the same modulation class as the received frame. + * + * As a consequence, we need to add all mandatory rates that are + * lower than all of the basic rates to these bitmaps. + */ + + if (IWL_RATE_24M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; + if (IWL_RATE_12M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; + /* 6M already there or needed so always add */ + ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; + + /* + * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. + * Note, however: + * - if no CCK rates are basic, it must be ERP since there must + * be some basic rates at all, so they're OFDM => ERP PHY + * (or we're in 5 GHz, and the cck bitmap will never be used) + * - if 11M is a basic rate, it must be ERP as well, so add 5.5M + * - if 5.5M is basic, 1M and 2M are mandatory + * - if 2M is basic, 1M is mandatory + * - if 1M is basic, that's the only valid ACK rate. + * As a consequence, it's not as complicated as it sounds, just add + * any lower rates to the ACK rate bitmap. + */ + if (IWL_RATE_11M_INDEX < lowest_present_cck) + cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_5M_INDEX < lowest_present_cck) + cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_2M_INDEX < lowest_present_cck) + cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; + /* 1M already there or needed so always add */ + cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; + + *cck_rates = cck; + *ofdm_rates = ofdm; +} + +static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_ctx_cmd *cmd) +{ + /* for both sta and ap, ht_operation_mode hold the protection_mode */ + u8 protection_mode = vif->bss_conf.ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION; + /* The fw does not distinguish between ht and fat */ + u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; + + IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); + /* + * See section 9.23.3.1 of IEEE 80211-2012. + * Nongreenfield HT STAs Present is not supported. + */ + switch (protection_mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONE: + break; + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + cmd->protection_flags |= cpu_to_le32(ht_flag); + break; + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + /* Protect when channel wider than 20MHz */ + if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) + cmd->protection_flags |= cpu_to_le32(ht_flag); + break; + default: + IWL_ERR(mvm, "Illegal protection mode %d\n", + protection_mode); + break; + } +} + +static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_ctx_cmd *cmd, + const u8 *bssid_override, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *chanctx; + bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION); + u8 cck_ack_rates, ofdm_ack_rates; + const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; + int i; + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd->action = cpu_to_le32(action); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (vif->p2p) + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); + else + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); + break; + case NL80211_IFTYPE_AP: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); + break; + case NL80211_IFTYPE_MONITOR: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); + break; + case NL80211_IFTYPE_P2P_DEVICE: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); + break; + case NL80211_IFTYPE_ADHOC: + cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); + break; + default: + WARN_ON_ONCE(1); + } + + cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); + + memcpy(cmd->node_addr, vif->addr, ETH_ALEN); + + if (bssid) + memcpy(cmd->bssid_addr, bssid, ETH_ALEN); + else + eth_broadcast_addr(cmd->bssid_addr); + + rcu_read_lock(); + chanctx = rcu_dereference(vif->chanctx_conf); + iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band + : IEEE80211_BAND_2GHZ, + &cck_ack_rates, &ofdm_ack_rates); + rcu_read_unlock(); + + cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); + cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); + + cmd->cck_short_preamble = + cpu_to_le32(vif->bss_conf.use_short_preamble ? + MAC_FLG_SHORT_PREAMBLE : 0); + cmd->short_slot = + cpu_to_le32(vif->bss_conf.use_short_slot ? + MAC_FLG_SHORT_SLOT : 0); + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u8 txf = iwl_mvm_ac_to_tx_fifo[i]; + + cmd->ac[txf].cw_min = + cpu_to_le16(mvmvif->queue_params[i].cw_min); + cmd->ac[txf].cw_max = + cpu_to_le16(mvmvif->queue_params[i].cw_max); + cmd->ac[txf].edca_txop = + cpu_to_le16(mvmvif->queue_params[i].txop * 32); + cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs; + cmd->ac[txf].fifos_mask = BIT(txf); + } + + /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ + if (vif->type == NL80211_IFTYPE_AP) + cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= + BIT(IWL_MVM_TX_FIFO_MCAST); + + if (vif->bss_conf.qos) + cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); + + if (vif->bss_conf.use_cts_prot) + cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); + + IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", + vif->bss_conf.use_cts_prot, + vif->bss_conf.ht_operation_mode); + if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) + cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); + if (ht_enabled) + iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); + + cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); +} + +static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, + struct iwl_mac_ctx_cmd *cmd) +{ + int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, + sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", + le32_to_cpu(cmd->action), ret); + return ret; +} + +static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action, bool force_assoc_off, + const u8 *bssid_override) +{ + struct iwl_mac_ctx_cmd cmd = {}; + struct iwl_mac_data_sta *ctxt_sta; + + WARN_ON(vif->type != NL80211_IFTYPE_STATION); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); + + if (vif->p2p) { + struct ieee80211_p2p_noa_attr *noa = + &vif->bss_conf.p2p_noa_attr; + + cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & + IEEE80211_P2P_OPPPS_CTWINDOW_MASK); + ctxt_sta = &cmd.p2p_sta.sta; + } else { + ctxt_sta = &cmd.sta; + } + + /* We need the dtim_period to set the MAC as associated */ + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && + !force_assoc_off) { + u32 dtim_offs; + + /* + * The DTIM count counts down, so when it is N that means N + * more beacon intervals happen until the DTIM TBTT. Therefore + * add this to the current time. If that ends up being in the + * future, the firmware will handle it. + * + * Also note that the system_timestamp (which we get here as + * "sync_device_ts") and TSF timestamp aren't at exactly the + * same offset in the frame -- the TSF is at the first symbol + * of the TSF, the system timestamp is at signal acquisition + * time. This means there's an offset between them of at most + * a few hundred microseconds (24 * 8 bits + PLCP time gives + * 384us in the longest case), this is currently not relevant + * as the firmware wakes up around 2ms before the TBTT. + */ + dtim_offs = vif->bss_conf.sync_dtim_count * + vif->bss_conf.beacon_int; + /* convert TU to usecs */ + dtim_offs *= 1024; + + ctxt_sta->dtim_tsf = + cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); + ctxt_sta->dtim_time = + cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); + + IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", + le64_to_cpu(ctxt_sta->dtim_tsf), + le32_to_cpu(ctxt_sta->dtim_time), + dtim_offs); + + ctxt_sta->is_assoc = cpu_to_le32(1); + } else { + ctxt_sta->is_assoc = cpu_to_le32(0); + + /* Allow beacons to pass through as long as we are not + * associated, or we do not have dtim period information. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); + } + + ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); + ctxt_sta->bi_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); + ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period); + ctxt_sta->dtim_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period)); + + ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); + ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); + + if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | + MAC_FILTER_IN_CONTROL_AND_MGMT | + MAC_FILTER_IN_BEACON | + MAC_FILTER_IN_PROBE_REQUEST | + MAC_FILTER_IN_CRC32); + ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | + MAC_FILTER_IN_PROBE_REQUEST); + + /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ + cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); + cmd.ibss.bi_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); + + /* TODO: Assumes that the beacon id == mac context id */ + cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +struct iwl_mvm_go_iterator_data { + bool go_active; +}; + +static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mvm_go_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && + mvmvif->ap_ibss_active) + data->go_active = true; +} + +static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + struct iwl_mvm_go_iterator_data data = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); + + /* Override the filter flags to accept only probe requests */ + cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + + /* + * This flag should be set to true when the P2P Device is + * discoverable and there is at least another active P2P GO. Settings + * this flag will allow the P2P Device to be discoverable on other + * channels in addition to its listen channel. + * Note that this flag should not be set in other cases as it opens the + * Rx filters on all MAC and increases the number of interrupts. + */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_go_iterator, &data); + + cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, + struct iwl_mac_beacon_cmd *beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u32 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* The index is relative to frame start but we start looking at the + * variable-length part of the beacon. */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + beacon_cmd->tim_idx = cpu_to_le32(tim_idx); + beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]); + } else { + IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); + } +} + +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_host_cmd cmd = { + .id = BEACON_TEMPLATE_CMD, + .flags = CMD_ASYNC, + }; + struct iwl_mac_beacon_cmd beacon_cmd = {}; + struct ieee80211_tx_info *info; + u32 beacon_skb_len; + u32 rate, tx_flags; + + if (WARN_ON(!beacon)) + return -EINVAL; + + beacon_skb_len = beacon->len; + + /* TODO: for now the beacon template id is set to be the mac context id. + * Might be better to handle it as another resource ... */ + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + info = IEEE80211_SKB_CB(beacon); + + /* Set up TX command fields */ + beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len); + beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id; + beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; + tx_flags |= + iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << + TX_CMD_FLG_BT_PRIO_POS; + beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags); + + mvm->mgmt_last_antenna_idx = + iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), + mvm->mgmt_last_antenna_idx); + + beacon_cmd.tx.rate_n_flags = + cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << + RATE_MCS_ANT_POS); + + if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) { + rate = IWL_FIRST_OFDM_RATE; + } else { + rate = IWL_FIRST_CCK_RATE; + beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); + } + beacon_cmd.tx.rate_n_flags |= + cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); + + /* Set up TX beacon command fields */ + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd, + beacon->data, + beacon_skb_len); + + /* Submit command */ + cmd.len[0] = sizeof(beacon_cmd); + cmd.data[0] = &beacon_cmd; + cmd.dataflags[0] = 0; + cmd.len[1] = beacon_skb_len; + cmd.data[1] = beacon->data; + cmd.dataflags[1] = IWL_HCMD_DFL_DUP; + + return iwl_mvm_send_cmd(mvm, &cmd); +} + +/* The beacon template for the AP/GO/IBSS has changed and needs update */ +int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct sk_buff *beacon; + int ret; + + WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC); + + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + if (!beacon) + return -ENOMEM; + + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); + dev_kfree_skb(beacon); + return ret; +} + +struct iwl_mvm_mac_ap_iterator_data { + struct iwl_mvm *mvm; + struct ieee80211_vif *vif; + u32 beacon_device_ts; + u16 beacon_int; +}; + +/* Find the beacon_device_ts and beacon_int for a managed interface */ +static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_mac_ap_iterator_data *data = _data; + + if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) + return; + + /* Station client has higher priority over P2P client*/ + if (vif->p2p && data->beacon_device_ts) + return; + + data->beacon_device_ts = vif->bss_conf.sync_device_ts; + data->beacon_int = vif->bss_conf.beacon_int; +} + +/* + * Fill the specific data for mac context of type AP of P2P GO + */ +static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_data_ap *ctxt_ap, + bool add) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_mac_ap_iterator_data data = { + .mvm = mvm, + .vif = vif, + .beacon_device_ts = 0 + }; + + ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); + ctxt_ap->bi_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); + ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period); + ctxt_ap->dtim_reciprocal = + cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period)); + + ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); + + /* + * Only set the beacon time when the MAC is being added, when we + * just modify the MAC then we should keep the time -- the firmware + * can otherwise have a "jumping" TBTT. + */ + if (add) { + /* + * If there is a station/P2P client interface which is + * associated, set the AP's TBTT far enough from the station's + * TBTT. Otherwise, set it to the current system time + */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_mac_ap_iterator, &data); + + if (data.beacon_device_ts) { + u32 rand = (prandom_u32() % (64 - 36)) + 36; + mvmvif->ap_beacon_time = data.beacon_device_ts + + ieee80211_tu_to_usec(data.beacon_int * rand / + 100); + } else { + mvmvif->ap_beacon_time = + iwl_read_prph(mvm->trans, + DEVICE_SYSTEM_TIME_REG); + } + } + + ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); + ctxt_ap->beacon_tsf = 0; /* unused */ + + /* TODO: Assume that the beacon id == mac context id */ + ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); +} + +static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + /* + * pass probe requests and beacons from other APs (needed + * for ht protection); when there're no any associated station + * don't ask FW to pass beacons to prevent unnecessary wake-ups. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + if (mvmvif->ap_assoc_sta_count) { + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); + IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); + } else { + IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); + } + + /* Fill the data specific for ap mode */ + iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, + action == FW_CTXT_ACTION_ADD); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; + + WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); + + /* Fill the common data for all mac context types */ + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + /* + * pass probe requests and beacons from other APs (needed + * for ht protection) + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST | + MAC_FILTER_IN_BEACON); + + /* Fill the data specific for GO mode */ + iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap, + action == FW_CTXT_ACTION_ADD); + + cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & + IEEE80211_P2P_OPPPS_CTWINDOW_MASK); + cmd.go.opp_ps_enabled = + cpu_to_le32(!!(noa->oppps_ctwindow & + IEEE80211_P2P_OPPPS_ENABLE_BIT)); + + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u32 action, bool force_assoc_off, + const u8 *bssid_override) +{ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, + force_assoc_off, + bssid_override); + break; + case NL80211_IFTYPE_AP: + if (!vif->p2p) + return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); + else + return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); + break; + case NL80211_IFTYPE_MONITOR: + return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); + case NL80211_IFTYPE_P2P_DEVICE: + return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); + case NL80211_IFTYPE_ADHOC: + return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action); + default: + break; + } + + return -EOPNOTSUPP; +} + +int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, + true, NULL); + if (ret) + return ret; + + /* will only do anything at resume from D3 time */ + iwl_mvm_set_last_nonqos_seq(mvm, vif); + + mvmvif->uploaded = true; + return 0; +} + +int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool force_assoc_off, const u8 *bssid_override) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, + force_assoc_off, bssid_override); +} + +int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_ctx_cmd cmd; + int ret; + + if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); + + ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, + sizeof(cmd), &cmd); + if (ret) { + IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); + return ret; + } + + mvmvif->uploaded = false; + + if (vif->type == NL80211_IFTYPE_MONITOR) + __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); + + return 0; +} + +static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, + struct ieee80211_vif *csa_vif, u32 gp2, + bool tx_success) +{ + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(csa_vif); + + /* Don't start to countdown from a failed beacon */ + if (!tx_success && !mvmvif->csa_countdown) + return; + + mvmvif->csa_countdown = true; + + if (!ieee80211_csa_is_complete(csa_vif)) { + int c = ieee80211_csa_update_counter(csa_vif); + + iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); + if (csa_vif->p2p && + !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && + tx_success) { + u32 rel_time = (c + 1) * + csa_vif->bss_conf.beacon_int - + IWL_MVM_CHANNEL_SWITCH_TIME_GO; + u32 apply_time = gp2 + rel_time * 1024; + + iwl_mvm_schedule_csa_period(mvm, csa_vif, + IWL_MVM_CHANNEL_SWITCH_TIME_GO - + IWL_MVM_CHANNEL_SWITCH_MARGIN, + apply_time); + } + } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { + /* we don't have CSA NoA scheduled yet, switch now */ + ieee80211_csa_finish(csa_vif); + RCU_INIT_POINTER(mvm->csa_vif, NULL); + } +} + +void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; + struct iwl_mvm_tx_resp *beacon_notify_hdr; + struct ieee80211_vif *csa_vif; + struct ieee80211_vif *tx_blocked_vif; + u16 status; + + lockdep_assert_held(&mvm->mutex); + + beacon_notify_hdr = &beacon->beacon_notify_hdr; + mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); + + status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK; + IWL_DEBUG_RX(mvm, + "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", + status, beacon_notify_hdr->failure_frame, + le64_to_cpu(beacon->tsf), + mvm->ap_last_beacon_gp2, + le32_to_cpu(beacon_notify_hdr->initial_rate)); + + csa_vif = rcu_dereference_protected(mvm->csa_vif, + lockdep_is_held(&mvm->mutex)); + if (unlikely(csa_vif && csa_vif->csa_active)) + iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, + (status == TX_STATUS_SUCCESS)); + + tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, + lockdep_is_held(&mvm->mutex)); + if (unlikely(tx_blocked_vif)) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(tx_blocked_vif); + + /* + * The channel switch is started and we have blocked the + * stations. If this is the first beacon (the timeout wasn't + * set), set the unblock timeout, otherwise countdown + */ + if (!mvm->csa_tx_block_bcn_timeout) + mvm->csa_tx_block_bcn_timeout = + IWL_MVM_CS_UNBLOCK_TX_TIMEOUT; + else + mvm->csa_tx_block_bcn_timeout--; + + /* Check if the timeout is expired, and unblock tx */ + if (mvm->csa_tx_block_bcn_timeout == 0) { + iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); + RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); + } + } +} + +static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_missed_beacons_notif *missed_beacons = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; + struct iwl_fw_dbg_trigger_tlv *trigger; + u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; + u32 rx_missed_bcon, rx_missed_bcon_since_rx; + + if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) + return; + + rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons); + rx_missed_bcon_since_rx = + le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx); + /* + * TODO: the threshold should be adjusted based on latency conditions, + * and/or in case of a CS flow on one of the other AP vifs. + */ + if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > + IWL_MVM_MISSED_BEACONS_THRESHOLD) + ieee80211_beacon_loss(vif); + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, + FW_DBG_TRIGGER_MISSED_BEACONS)) + return; + + trigger = iwl_fw_dbg_get_trigger(mvm->fw, + FW_DBG_TRIGGER_MISSED_BEACONS); + bcon_trig = (void *)trigger->data; + stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); + stop_trig_missed_bcon_since_rx = + le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); + + /* TODO: implement start trigger */ + + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + return; + + if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || + rx_missed_bcon >= stop_trig_missed_bcon) + iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); +} + +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacons_notif *mb = (void *)pkt->data; + + IWL_DEBUG_INFO(mvm, + "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", + le32_to_cpu(mb->mac_id), + le32_to_cpu(mb->consec_missed_beacons), + le32_to_cpu(mb->consec_missed_beacons_since_last_rx), + le32_to_cpu(mb->num_recvd_beacons), + le32_to_cpu(mb->num_expected_beacons)); + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_beacon_loss_iterator, + mb); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c new file mode 100644 index 000000000000..1fb684693040 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -0,0 +1,4260 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-op-mode.h" +#include "iwl-io.h" +#include "mvm.h" +#include "sta.h" +#include "time-event.h" +#include "iwl-eeprom-parse.h" +#include "iwl-phy-db.h" +#include "testmode.h" +#include "iwl-fw-error-dump.h" +#include "iwl-prph.h" +#include "iwl-csr.h" +#include "iwl-nvm-parse.h" + +static const struct ieee80211_iface_limit iwl_mvm_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + +static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { + { + .num_different_channels = 2, + .max_interfaces = 3, + .limits = iwl_mvm_limits, + .n_limits = ARRAY_SIZE(iwl_mvm_limits), + }, +}; + +#ifdef CONFIG_PM_SLEEP +static const struct nl80211_wowlan_tcp_data_token_feature +iwl_mvm_wowlan_tcp_token_feature = { + .min_len = 0, + .max_len = 255, + .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS, +}; + +static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { + .tok = &iwl_mvm_wowlan_tcp_token_feature, + .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN - + sizeof(struct ethhdr) - + sizeof(struct iphdr) - + sizeof(struct tcphdr), + .data_interval_max = 65535, /* __le16 in API */ + .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN - + sizeof(struct ethhdr) - + sizeof(struct iphdr) - + sizeof(struct tcphdr), + .seq = true, +}; +#endif + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING +/* + * Use the reserved field to indicate magic values. + * these values will only be used internally by the driver, + * and won't make it to the fw (reserved will be 0). + * BC_FILTER_MAGIC_IP - configure the val of this attribute to + * be the vif's ip address. in case there is not a single + * ip address (0, or more than 1), this attribute will + * be skipped. + * BC_FILTER_MAGIC_MAC - set the val of this attribute to + * the LSB bytes of the vif's mac address + */ +enum { + BC_FILTER_MAGIC_NONE = 0, + BC_FILTER_MAGIC_IP, + BC_FILTER_MAGIC_MAC, +}; + +static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { + { + /* arp */ + .discard = 0, + .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, + .attrs = { + { + /* frame type - arp, hw type - ethernet */ + .offset_type = + BCAST_FILTER_OFFSET_PAYLOAD_START, + .offset = sizeof(rfc1042_header), + .val = cpu_to_be32(0x08060001), + .mask = cpu_to_be32(0xffffffff), + }, + { + /* arp dest ip */ + .offset_type = + BCAST_FILTER_OFFSET_PAYLOAD_START, + .offset = sizeof(rfc1042_header) + 2 + + sizeof(struct arphdr) + + ETH_ALEN + sizeof(__be32) + + ETH_ALEN, + .mask = cpu_to_be32(0xffffffff), + /* mark it as special field */ + .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), + }, + }, + }, + { + /* dhcp offer bcast */ + .discard = 0, + .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, + .attrs = { + { + /* udp dest port - 68 (bootp client)*/ + .offset_type = BCAST_FILTER_OFFSET_IP_END, + .offset = offsetof(struct udphdr, dest), + .val = cpu_to_be32(0x00440000), + .mask = cpu_to_be32(0xffff0000), + }, + { + /* dhcp - lsb bytes of client hw address */ + .offset_type = BCAST_FILTER_OFFSET_IP_END, + .offset = 38, + .mask = cpu_to_be32(0xffffffff), + /* mark it as special field */ + .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), + }, + }, + }, + /* last filter must be empty */ + {}, +}; +#endif + +void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) +{ + if (!iwl_mvm_is_d0i3_supported(mvm)) + return; + + IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); + spin_lock_bh(&mvm->refs_lock); + mvm->refs[ref_type]++; + spin_unlock_bh(&mvm->refs_lock); + iwl_trans_ref(mvm->trans); +} + +void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) +{ + if (!iwl_mvm_is_d0i3_supported(mvm)) + return; + + IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); + spin_lock_bh(&mvm->refs_lock); + WARN_ON(!mvm->refs[ref_type]--); + spin_unlock_bh(&mvm->refs_lock); + iwl_trans_unref(mvm->trans); +} + +static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, + enum iwl_mvm_ref_type except_ref) +{ + int i, j; + + if (!iwl_mvm_is_d0i3_supported(mvm)) + return; + + spin_lock_bh(&mvm->refs_lock); + for (i = 0; i < IWL_MVM_REF_COUNT; i++) { + if (except_ref == i || !mvm->refs[i]) + continue; + + IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", + i, mvm->refs[i]); + for (j = 0; j < mvm->refs[i]; j++) + iwl_trans_unref(mvm->trans); + mvm->refs[i] = 0; + } + spin_unlock_bh(&mvm->refs_lock); +} + +bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) +{ + int i; + bool taken = false; + + if (!iwl_mvm_is_d0i3_supported(mvm)) + return true; + + spin_lock_bh(&mvm->refs_lock); + for (i = 0; i < IWL_MVM_REF_COUNT; i++) { + if (mvm->refs[i]) { + taken = true; + break; + } + } + spin_unlock_bh(&mvm->refs_lock); + + return taken; +} + +int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) +{ + iwl_mvm_ref(mvm, ref_type); + + if (!wait_event_timeout(mvm->d0i3_exit_waitq, + !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), + HZ)) { + WARN_ON_ONCE(1); + iwl_mvm_unref(mvm, ref_type); + return -EIO; + } + + return 0; +} + +static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) +{ + int i; + + memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); + for (i = 0; i < NUM_PHY_CTX; i++) { + mvm->phy_ctxts[i].id = i; + mvm->phy_ctxts[i].ref = 0; + } +} + +struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, + const char *alpha2, + enum iwl_mcc_source src_id, + bool *changed) +{ + struct ieee80211_regdomain *regd = NULL; + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mcc_update_resp *resp; + + IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); + + lockdep_assert_held(&mvm->mutex); + + resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); + if (IS_ERR_OR_NULL(resp)) { + IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", + PTR_ERR_OR_ZERO(resp)); + goto out; + } + + if (changed) + *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); + + regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, + __le32_to_cpu(resp->n_channels), + resp->channels, + __le16_to_cpu(resp->mcc)); + /* Store the return source id */ + src_id = resp->source_id; + kfree(resp); + if (IS_ERR_OR_NULL(regd)) { + IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", + PTR_ERR_OR_ZERO(regd)); + goto out; + } + + IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", + regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); + mvm->lar_regdom_set = true; + mvm->mcc_src = src_id; + +out: + return regd; +} + +void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) +{ + bool changed; + struct ieee80211_regdomain *regd; + + if (!iwl_mvm_is_lar_supported(mvm)) + return; + + regd = iwl_mvm_get_current_regdomain(mvm, &changed); + if (!IS_ERR_OR_NULL(regd)) { + /* only update the regulatory core if changed */ + if (changed) + regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); + + kfree(regd); + } +} + +struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, + bool *changed) +{ + return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", + iwl_mvm_is_wifi_mcc_supported(mvm) ? + MCC_SOURCE_GET_CURRENT : + MCC_SOURCE_OLD_FW, changed); +} + +int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) +{ + enum iwl_mcc_source used_src; + struct ieee80211_regdomain *regd; + int ret; + bool changed; + const struct ieee80211_regdomain *r = + rtnl_dereference(mvm->hw->wiphy->regd); + + if (!r) + return -ENOENT; + + /* save the last source in case we overwrite it below */ + used_src = mvm->mcc_src; + if (iwl_mvm_is_wifi_mcc_supported(mvm)) { + /* Notify the firmware we support wifi location updates */ + regd = iwl_mvm_get_current_regdomain(mvm, NULL); + if (!IS_ERR_OR_NULL(regd)) + kfree(regd); + } + + /* Now set our last stored MCC and source */ + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, + &changed); + if (IS_ERR_OR_NULL(regd)) + return -EIO; + + /* update cfg80211 if the regdomain was changed */ + if (changed) + ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); + else + ret = 0; + + kfree(regd); + return ret; +} + +int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) +{ + struct ieee80211_hw *hw = mvm->hw; + int num_mac, ret, i; + static const u32 mvm_ciphers[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + }; + + /* Tell mac80211 our characteristics */ + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SPECTRUM_MGMT); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, QUEUE_CONTROL); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, TIMING_BEACON_ONLY); + ieee80211_hw_set(hw, CONNECTION_MONITOR); + ieee80211_hw_set(hw, CHANCTX_STA_CSA); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); + + hw->queues = mvm->first_agg_queue; + hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; + hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; + hw->rate_control_algorithm = "iwl-mvm-rs"; + hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; + hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + + BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2); + memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); + hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); + hw->wiphy->cipher_suites = mvm->ciphers; + + /* + * Enable 11w if advertised by firmware and software crypto + * is not enabled (as the firmware will interpret some mgmt + * packets, so enabling it with software crypto isn't safe) + */ + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && + !iwlwifi_mod_params.sw_crypto) { + ieee80211_hw_set(hw, MFP_CAPABLE); + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_AES_CMAC; + hw->wiphy->n_cipher_suites++; + } + + /* currently FW API supports only one optional cipher scheme */ + if (mvm->fw->cs[0].cipher) { + mvm->hw->n_cipher_schemes = 1; + mvm->hw->cipher_schemes = &mvm->fw->cs[0]; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + mvm->fw->cs[0].cipher; + hw->wiphy->n_cipher_suites++; + } + + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); + hw->wiphy->features |= + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + + hw->sta_data_size = sizeof(struct iwl_mvm_sta); + hw->vif_data_size = sizeof(struct iwl_mvm_vif); + hw->chanctx_data_size = sizeof(u16); + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; + if (iwl_mvm_is_lar_supported(mvm)) + hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; + else + hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | + REGULATORY_DISABLE_BEACON_HINTS; + + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + + hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + + hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwl_mvm_iface_combinations); + + hw->wiphy->max_remain_on_channel_duration = 10000; + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + /* we can compensate an offset of up to 3 channels = 15 MHz */ + hw->wiphy->max_adj_channel_rssi_comp = 3 * 5; + + /* Extract MAC address */ + memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); + hw->wiphy->addresses = mvm->addresses; + hw->wiphy->n_addresses = 1; + + /* Extract additional MAC addresses if available */ + num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? + min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; + + for (i = 1; i < num_mac; i++) { + memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, + ETH_ALEN); + mvm->addresses[i].addr[5]++; + hw->wiphy->n_addresses++; + } + + iwl_mvm_reset_phy_ctxts(mvm); + + hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + + BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); + BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || + IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; + else + mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; + + if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) { + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEAMFORMER) && + fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_LQ_SS_PARAMS)) + hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + } + + hw->wiphy->hw_version = mvm->trans->hw_id; + + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) + hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; + hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; + /* we create the 802.11 header and zero length SSID IE. */ + hw->wiphy->max_sched_scan_ie_len = + SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; + hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; + hw->wiphy->max_sched_scan_plan_interval = U16_MAX; + + /* + * the firmware uses u8 for num of iterations, but 0xff is saved for + * infinite loop, so the maximum number of iterations is actually 254. + */ + hw->wiphy->max_sched_scan_plan_iterations = 254; + + hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | + NL80211_FEATURE_LOW_PRIORITY_SCAN | + NL80211_FEATURE_P2P_GO_OPPPS | + NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_STATIC_SMPS | + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) + hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) + hw->wiphy->features |= NL80211_FEATURE_QUIET; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) + hw->wiphy->features |= + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) + hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; + + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; + +#ifdef CONFIG_PM_SLEEP + if (iwl_mvm_is_d0i3_supported(mvm) && + device_can_wakeup(mvm->trans->dev)) { + mvm->wowlan.flags = WIPHY_WOWLAN_ANY; + hw->wiphy->wowlan = &mvm->wowlan; + } + + if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + mvm->trans->ops->d3_suspend && + mvm->trans->ops->d3_resume && + device_can_wakeup(mvm->trans->dev)) { + mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE | + WIPHY_WOWLAN_NET_DETECT; + if (!iwlwifi_mod_params.sw_crypto) + mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + + mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; + mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; + mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; + mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + hw->wiphy->wowlan = &mvm->wowlan; + } +#endif + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING + /* assign default bcast filtering configuration */ + mvm->bcast_filters = iwl_mvm_default_bcast_filters; +#endif + + ret = iwl_mvm_leds_init(mvm); + if (ret) + return ret; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { + IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + ieee80211_hw_set(hw, TDLS_WIDER_BW); + } + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { + IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); + hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + } + + hw->netdev_features |= mvm->cfg->features; + if (!iwl_mvm_is_csum_supported(mvm)) + hw->netdev_features &= ~NETIF_F_RXCSUM; + + ret = ieee80211_register_hw(mvm->hw); + if (ret) + iwl_mvm_leds_exit(mvm); + + return ret; +} + +static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + struct iwl_mvm_sta *mvmsta; + bool defer = false; + + /* + * double check the IN_D0I3 flag both before and after + * taking the spinlock, in order to prevent taking + * the spinlock when not needed. + */ + if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) + return false; + + spin_lock(&mvm->d0i3_tx_lock); + /* + * testing the flag again ensures the skb dequeue + * loop (on d0i3 exit) hasn't run yet. + */ + if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) + goto out; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (mvmsta->sta_id == IWL_MVM_STATION_COUNT || + mvmsta->sta_id != mvm->d0i3_ap_sta_id) + goto out; + + __skb_queue_tail(&mvm->d0i3_tx, skb); + ieee80211_stop_queues(mvm->hw); + + /* trigger wakeup */ + iwl_mvm_ref(mvm, IWL_MVM_REF_TX); + iwl_mvm_unref(mvm, IWL_MVM_REF_TX); + + defer = true; +out: + spin_unlock(&mvm->d0i3_tx_lock); + return defer; +} + +static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_sta *sta = control->sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + + if (iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); + goto drop; + } + + if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && + !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) + goto drop; + + /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ + if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER && + ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_deauth(hdr->frame_control) && + !ieee80211_is_disassoc(hdr->frame_control) && + !ieee80211_is_action(hdr->frame_control))) + sta = NULL; + + if (sta) { + if (iwl_mvm_defer_tx(mvm, sta, skb)) + return; + if (iwl_mvm_tx_skb(mvm, skb, sta)) + goto drop; + return; + } + + if (iwl_mvm_tx_skb_non_sta(mvm, skb)) + goto drop; + return; + drop: + ieee80211_free_txskb(hw, skb); +} + +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* enabled by default */ + return true; +} + +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ + do { \ + if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ + break; \ + iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ + } while (0) + +static void +iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, + enum ieee80211_ampdu_mlme_action action) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + switch (action) { + case IEEE80211_AMPDU_TX_OPERATIONAL: { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + + CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, + "TX AGG START: MAC %pM tid %d ssn %d\n", + sta->addr, tid, tid_data->ssn); + break; + } + case IEEE80211_AMPDU_TX_STOP_CONT: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, + "TX AGG STOP: MAC %pM tid %d\n", + sta->addr, tid); + break; + case IEEE80211_AMPDU_RX_START: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, + "RX AGG START: MAC %pM tid %d ssn %d\n", + sta->addr, tid, rx_ba_ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, + "RX AGG STOP: MAC %pM tid %d\n", + sta->addr, tid); + break; + default: + break; + } +} + +static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, + u16 *ssn, u8 buf_size, bool amsdu) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + bool tx_agg_ref = false; + + IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", + sta->addr, tid, action); + + if (!(mvm->nvm_data->sku_cap_11n_enable)) + return -EACCES; + + /* return from D0i3 before starting a new Tx aggregation */ + switch (action) { + case IEEE80211_AMPDU_TX_START: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + case IEEE80211_AMPDU_TX_OPERATIONAL: + /* + * for tx start, wait synchronously until D0i3 exit to + * get the correct sequence number for the tid. + * additionally, some other ampdu actions use direct + * target access, which is not handled automatically + * by the trans layer (unlike commands), so wait for + * d0i3 exit in these cases as well. + */ + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); + if (ret) + return ret; + + tx_agg_ref = true; + break; + default: + break; + } + + mutex_lock(&mvm->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (!iwl_enable_rx_ampdu(mvm->cfg)) { + ret = -EINVAL; + break; + } + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); + break; + case IEEE80211_AMPDU_RX_STOP: + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); + break; + case IEEE80211_AMPDU_TX_START: + if (!iwl_enable_tx_ampdu(mvm->cfg)) { + ret = -EINVAL; + break; + } + ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + break; + } + + if (!ret) { + u16 rx_ba_ssn = 0; + + if (action == IEEE80211_AMPDU_RX_START) + rx_ba_ssn = *ssn; + + iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, + rx_ba_ssn, action); + } + mutex_unlock(&mvm->mutex); + + /* + * If the tid is marked as started, we won't use it for offloaded + * traffic on the next D0i3 entry. It's safe to unref. + */ + if (tx_agg_ref) + iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); + + return ret; +} + +static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->uploaded = false; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); + spin_unlock_bh(&mvm->time_event_lock); + + mvmvif->phy_ctxt = NULL; + memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); +} + +static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen) +{ + const struct iwl_mvm_dump_ptrs *dump_ptrs = data; + ssize_t bytes_read; + ssize_t bytes_read_trans; + + if (offset < dump_ptrs->op_mode_len) { + bytes_read = min_t(ssize_t, count, + dump_ptrs->op_mode_len - offset); + memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset, + bytes_read); + offset += bytes_read; + count -= bytes_read; + + if (count == 0) + return bytes_read; + } else { + bytes_read = 0; + } + + if (!dump_ptrs->trans_ptr) + return bytes_read; + + offset -= dump_ptrs->op_mode_len; + bytes_read_trans = min_t(ssize_t, count, + dump_ptrs->trans_ptr->len - offset); + memcpy(buffer + bytes_read, + (u8 *)dump_ptrs->trans_ptr->data + offset, + bytes_read_trans); + + return bytes_read + bytes_read_trans; +} + +static void iwl_mvm_free_coredump(const void *data) +{ + const struct iwl_mvm_dump_ptrs *fw_error_dump = data; + + vfree(fw_error_dump->op_mode_ptr); + vfree(fw_error_dump->trans_ptr); + kfree(fw_error_dump); +} + +static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + unsigned long flags; + int i, j; + + if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) + return; + + /* Pull RXF data from all RXFs */ + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) { + /* + * Keep aside the additional offset that might be needed for + * next RXF + */ + u32 offset_diff = RXF_DIFF_FROM_PREV * i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the RXF */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_D_SPACE + + offset_diff)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_WR_PTR + + offset_diff)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_RD_PTR + + offset_diff)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_FENCE_PTR + + offset_diff)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_SET_FENCE_MODE + + offset_diff)); + + /* Lock fence */ + iwl_trans_write_prph(mvm->trans, + RXF_SET_FENCE_MODE + offset_diff, 0x1); + /* Set fence pointer to the same place like WR pointer */ + iwl_trans_write_prph(mvm->trans, + RXF_LD_WR2FENCE + offset_diff, 0x1); + /* Set fence offset */ + iwl_trans_write_prph(mvm->trans, + RXF_LD_FENCE_OFFSET_ADDR + offset_diff, + 0x0); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = iwl_trans_read_prph(mvm->trans, + RXF_FIFO_RD_FENCE_INC + + offset_diff); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + + /* Pull TXF data from all TXFs */ + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the FIFO */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FIFO_ITEM_CNT)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_WR_PTR)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_RD_PTR)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FENCE_PTR)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_LOCK_FENCE)); + + /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ + iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, + TXF_WR_PTR); + + /* Dummy-read to advance the read pointer to the head */ + iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = iwl_trans_read_prph(mvm->trans, + TXF_READ_MODIFY_DATA); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + + iwl_trans_release_nic_access(mvm->trans, &flags); +} + +void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) +{ + if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert || + !mvm->fw_dump_desc) + return; + + kfree(mvm->fw_dump_desc); + mvm->fw_dump_desc = NULL; +} + +#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ +#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ + +void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) +{ + struct iwl_fw_error_dump_file *dump_file; + struct iwl_fw_error_dump_data *dump_data; + struct iwl_fw_error_dump_info *dump_info; + struct iwl_fw_error_dump_mem *dump_mem; + struct iwl_fw_error_dump_trigger_desc *dump_trig; + struct iwl_mvm_dump_ptrs *fw_error_dump; + u32 sram_len, sram_ofs; + u32 file_len, fifo_data_len = 0; + u32 smem_len = mvm->cfg->smem_len; + u32 sram2_len = mvm->cfg->dccm2_len; + bool monitor_dump_only = false; + + lockdep_assert_held(&mvm->mutex); + + /* there's no point in fw dump if the bus is dead */ + if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { + IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); + return; + } + + if (mvm->fw_dump_trig && + mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + monitor_dump_only = true; + + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); + if (!fw_error_dump) + return; + + /* SRAM - include stack CCM if driver knows the values for it */ + if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { + const struct fw_img *img; + + img = &mvm->fw->img[mvm->cur_ucode]; + sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + } else { + sram_ofs = mvm->cfg->dccm_offset; + sram_len = mvm->cfg->dccm_len; + } + + /* reading RXF/TXF sizes */ + if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { + struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; + int i; + + fifo_data_len = 0; + + /* Count RXF size */ + for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { + if (!mem_cfg->rxfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += mem_cfg->rxfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) { + if (!mem_cfg->txfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += mem_cfg->txfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + + file_len = sizeof(*dump_file) + + sizeof(*dump_data) * 2 + + sram_len + sizeof(*dump_mem) + + fifo_data_len + + sizeof(*dump_info); + + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; + + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + + /* Make room for fw's virtual image pages, if it exists */ + if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) + file_len += mvm->num_of_paging_blk * + (sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_paging) + + PAGING_BLOCK_SIZE); + + /* If we only want a monitor dump, reset the file length */ + if (monitor_dump_only) { + file_len = sizeof(*dump_file) + sizeof(*dump_data) + + sizeof(*dump_info); + } + + /* + * In 8000 HW family B-step include the ICCM (which resides separately) + */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + IWL8260_ICCM_LEN; + + if (mvm->fw_dump_desc) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) + + mvm->fw_dump_desc->len; + + dump_file = vzalloc(file_len); + if (!dump_file) { + kfree(fw_error_dump); + iwl_mvm_free_fw_dump_desc(mvm); + return; + } + + fw_error_dump->op_mode_ptr = dump_file; + + dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); + dump_data = (void *)dump_file->data; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_info)); + dump_info = (void *) dump_data->data; + dump_info->device_family = + mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); + dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, + sizeof(dump_info->fw_human_readable)); + strncpy(dump_info->dev_human_readable, mvm->cfg->name, + sizeof(dump_info->dev_human_readable)); + strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, + sizeof(dump_info->bus_human_readable)); + + dump_data = iwl_fw_error_next_data(dump_data); + /* We only dump the FIFOs if the FW is in error state */ + if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) + iwl_mvm_dump_fifos(mvm, &dump_data); + + if (mvm->fw_dump_desc) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_trig) + + mvm->fw_dump_desc->len); + dump_trig = (void *)dump_data->data; + memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, + sizeof(*dump_trig) + mvm->fw_dump_desc->len); + + /* now we can free this copy */ + iwl_mvm_free_fw_dump_desc(mvm); + dump_data = iwl_fw_error_next_data(dump_data); + } + + /* In case we only want monitor dump, skip to dump trasport data */ + if (monitor_dump_only) + goto dump_trans_data; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(sram_ofs); + iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, + sram_len); + + if (smem_len) { + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); + dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); + iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, + dump_mem->data, smem_len); + } + + if (sram2_len) { + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); + iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, + dump_mem->data, sram2_len); + } + + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); + iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, + dump_mem->data, IWL8260_ICCM_LEN); + } + + /* Dump fw's virtual image */ + if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { + u32 i; + + for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { + struct iwl_fw_error_dump_paging *paging; + struct page *pages = + mvm->fw_paging_db[i].fw_paging_block; + + dump_data = iwl_fw_error_next_data(dump_data); + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); + dump_data->len = cpu_to_le32(sizeof(*paging) + + PAGING_BLOCK_SIZE); + paging = (void *)dump_data->data; + paging->index = cpu_to_le32(i); + memcpy(paging->data, page_address(pages), + PAGING_BLOCK_SIZE); + } + } + +dump_trans_data: + fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, + mvm->fw_dump_trig); + fw_error_dump->op_mode_len = file_len; + if (fw_error_dump->trans_ptr) + file_len += fw_error_dump->trans_ptr->len; + dump_file->file_len = cpu_to_le32(file_len); + + dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, + GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); + + mvm->fw_dump_trig = NULL; + clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); +} + +struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { + .trig_desc = { + .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), + }, +}; + +static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) +{ + /* clear the D3 reconfig, we only need it to avoid dumping a + * firmware coredump on reconfiguration, we shouldn't do that + * on D3->D0 transition + */ + if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { + mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; + iwl_mvm_fw_error_dump(mvm); + } + + /* cleanup all stale references (scan, roc), but keep the + * ucode_down ref until reconfig is complete + */ + iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); + + iwl_trans_stop_device(mvm->trans); + + mvm->scan_status = 0; + mvm->ps_disabled = false; + mvm->calibrating = false; + + /* just in case one was running */ + ieee80211_remain_on_channel_expired(mvm->hw); + + /* + * cleanup all interfaces, even inactive ones, as some might have + * gone down during the HW restart + */ + ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); + + mvm->p2p_device_vif = NULL; + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + + iwl_mvm_reset_phy_ctxts(mvm); + memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); + memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); + memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); + memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); + memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); + memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); + memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk)); + memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk)); + + ieee80211_wake_queues(mvm->hw); + + /* clear any stale d0i3 state */ + clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + + mvm->vif_count = 0; + mvm->rx_ba_sessions = 0; + mvm->fw_dbg_conf = FW_DBG_INVALID; + + /* keep statistics ticking */ + iwl_mvm_accu_radio_stats(mvm); +} + +int __iwl_mvm_mac_start(struct iwl_mvm *mvm) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* Clean up some internal and mac80211 state on restart */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_restart_cleanup(mvm); + + ret = iwl_mvm_up(mvm); + + if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* Something went wrong - we need to finish some cleanup + * that normally iwl_mvm_mac_restart_complete() below + * would do. + */ + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + iwl_mvm_d0i3_enable_tx(mvm, NULL); + } + + return ret; +} + +static int iwl_mvm_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + /* Some hw restart cleanups must not hold the mutex */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* + * Make sure we are out of d0i3. This is needed + * to make sure the reference accounting is correct + * (and there is no stale d0i3_exit_work). + */ + wait_event_timeout(mvm->d0i3_exit_waitq, + !test_bit(IWL_MVM_STATUS_IN_D0I3, + &mvm->status), + HZ); + } + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_mac_start(mvm); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) +{ + int ret; + + mutex_lock(&mvm->mutex); + + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + iwl_mvm_d0i3_enable_tx(mvm, NULL); + ret = iwl_mvm_update_quotas(mvm, true, NULL); + if (ret) + IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", + ret); + + /* allow transport/FW low power modes */ + iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + + /* + * If we have TDLS peers, remove them. We don't know the last seqno/PN + * of packets the FW sent out, so we must reconnect. + */ + iwl_mvm_teardown_tdls_peers(mvm); + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) +{ + if (!iwl_mvm_is_d0i3_supported(mvm)) + return; + + if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) + if (!wait_event_timeout(mvm->d0i3_exit_waitq, + !test_bit(IWL_MVM_STATUS_IN_D0I3, + &mvm->status), + HZ)) + WARN_ONCE(1, "D0i3 exit on resume timed out\n"); +} + +static void +iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + switch (reconfig_type) { + case IEEE80211_RECONFIG_TYPE_RESTART: + iwl_mvm_restart_complete(mvm); + break; + case IEEE80211_RECONFIG_TYPE_SUSPEND: + iwl_mvm_resume_complete(mvm); + break; + } +} + +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + /* firmware counters are obviously reset now, but we shouldn't + * partially track so also clear the fw_reset_accu counters. + */ + memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); + + /* + * Disallow low power states when the FW is down by taking + * the UCODE_DOWN ref. in case of ongoing hw restart the + * ref is already taken, so don't take it again. + */ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + + /* async_handlers_wk is now blocked */ + + /* + * The work item could be running or queued if the + * ROC time event stops just as we get here. + */ + flush_work(&mvm->roc_done_wk); + + iwl_trans_stop_device(mvm->trans); + + iwl_mvm_async_handlers_purge(mvm); + /* async_handlers_list is empty and will stay empty: HW is stopped */ + + /* the fw is stopped, the aux sta is dead: clean up driver state */ + iwl_mvm_del_aux_sta(mvm); + + /* + * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() + * won't be called in this case). + * But make sure to cleanup interfaces that have gone down before/during + * HW restart was requested. + */ + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + ieee80211_iterate_interfaces(mvm->hw, 0, + iwl_mvm_cleanup_iterator, mvm); + + /* We shouldn't have any UIDs still set. Loop over all the UIDs to + * make sure there's nothing left there and warn if any is found. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + int i; + + for (i = 0; i < mvm->max_scans; i++) { + if (WARN_ONCE(mvm->scan_uid_status[i], + "UMAC scan UID %d status was not cleaned\n", + i)) + mvm->scan_uid_status[i] = 0; + } + } + + mvm->ucode_loaded = false; +} + +static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + flush_work(&mvm->d0i3_exit_work); + flush_work(&mvm->async_handlers_wk); + cancel_delayed_work_sync(&mvm->fw_dump_wk); + iwl_mvm_free_fw_dump_desc(mvm); + + mutex_lock(&mvm->mutex); + __iwl_mvm_mac_stop(mvm); + mutex_unlock(&mvm->mutex); + + /* + * The worker might have been waiting for the mutex, let it run and + * discover that its list is now empty. + */ + cancel_work_sync(&mvm->async_handlers_wk); +} + +static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) +{ + u16 i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < NUM_PHY_CTX; i++) + if (!mvm->phy_ctxts[i].ref) + return &mvm->phy_ctxts[i]; + + IWL_ERR(mvm, "No available PHY context\n"); + return NULL; +} + +static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s16 tx_power) +{ + struct iwl_dev_tx_power_cmd cmd = { + .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .v2.mac_context_id = + cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), + .v2.pwr_restriction = cpu_to_le16(8 * tx_power), + }; + int len = sizeof(cmd); + + if (tx_power == IWL_DEFAULT_MAX_TX_POWER) + cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN)) + len = sizeof(cmd.v2); + + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); +} + +static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mvmvif->mvm = mvm; + + /* + * make sure D0i3 exit is completed, otherwise a target access + * during tx queue configuration could be done when still in + * D0i3 state. + */ + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); + if (ret) + return ret; + + /* + * Not much to do here. The stack will not allow interface + * types or combinations that we didn't advertise, so we + * don't really have to check the types. + */ + + mutex_lock(&mvm->mutex); + + /* make sure that beacon statistics don't go backwards with FW reset */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + + /* Allocate resources for the MAC context, and add it to the fw */ + ret = iwl_mvm_mac_ctxt_init(mvm, vif); + if (ret) + goto out_unlock; + + /* Counting number of interfaces is needed for legacy PM */ + if (vif->type != NL80211_IFTYPE_P2P_DEVICE) + mvm->vif_count++; + + /* + * The AP binding flow can be done only after the beacon + * template is configured (which happens only in the mac80211 + * start_ap() flow), and adding the broadcast station can happen + * only after the binding. + * In addition, since modifying the MAC before adding a bcast + * station is not allowed by the FW, delay the adding of MAC context to + * the point where we can also add the bcast station. + * In short: there's not much we can do at this point, other than + * allocating resources :) + */ + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); + if (ret) { + IWL_ERR(mvm, "Failed to allocate bcast sta\n"); + goto out_release; + } + + iwl_mvm_vif_dbgfs_register(mvm, vif); + goto out_unlock; + } + + mvmvif->features |= hw->netdev_features; + + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_release; + + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + goto out_remove_mac; + + /* beacon filtering */ + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_remove_mac; + + if (!mvm->bf_allowed_vif && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { + mvm->bf_allowed_vif = mvmvif; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + } + + /* + * P2P_DEVICE interface does not have a channel context assigned to it, + * so a dedicated PHY context is allocated to it and the corresponding + * MAC context is bound to it at this stage. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + + mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->phy_ctxt) { + ret = -ENOSPC; + goto out_free_bf; + } + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (ret) + goto out_unref_phy; + + ret = iwl_mvm_add_bcast_sta(mvm, vif); + if (ret) + goto out_unbind; + + /* Save a pointer to p2p device vif, so it can later be used to + * update the p2p device MAC when a GO is started/stopped */ + mvm->p2p_device_vif = vif; + } + + iwl_mvm_vif_dbgfs_register(mvm, vif); + goto out_unlock; + + out_unbind: + iwl_mvm_binding_remove_vif(mvm, vif); + out_unref_phy: + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + out_free_bf: + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + } + out_remove_mac: + mvmvif->phy_ctxt = NULL; + iwl_mvm_mac_ctxt_remove(mvm, vif); + out_release: + if (vif->type != NL80211_IFTYPE_P2P_DEVICE) + mvm->vif_count--; + + iwl_mvm_mac_ctxt_release(mvm, vif); + out_unlock: + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); + + return ret; +} + +static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); + + if (tfd_msk) { + /* + * mac80211 first removes all the stations of the vif and + * then removes the vif. When it removes a station it also + * flushes the AMPDU session. So by now, all the AMPDU sessions + * of all the stations of this vif are closed, and the queues + * of these AMPDU sessions are properly closed. + * We still need to take care of the shared queues of the vif. + * Flush them here. + */ + mutex_lock(&mvm->mutex); + iwl_mvm_flush_tx_path(mvm, tfd_msk, 0); + mutex_unlock(&mvm->mutex); + + /* + * There are transports that buffer a few frames in the host. + * For these, the flush above isn't enough since while we were + * flushing, the transport might have sent more frames to the + * device. To solve this, wait here until the transport is + * empty. Technically, this could have replaced the flush + * above, but flush is much faster than draining. So flush + * first, and drain to make sure we have no frames in the + * transport anymore. + * If a station still had frames on the shared queues, it is + * already marked as draining, so to complete the draining, we + * just need to wait until the transport is empty. + */ + iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk); + } + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + /* + * Flush the ROC worker which will flush the OFFCHANNEL queue. + * We assume here that all the packets sent to the OFFCHANNEL + * queue are sent in ROC session. + */ + flush_work(&mvm->roc_done_wk); + } else { + /* + * By now, all the AC queues are empty. The AGG queues are + * empty too. We already got all the Tx responses for all the + * packets in the queues. The drain work can have been + * triggered. Flush it. + */ + flush_work(&mvm->sta_drained_wk); + } +} + +static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + iwl_mvm_prepare_mac_removal(mvm, vif); + + mutex_lock(&mvm->mutex); + + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + } + + iwl_mvm_vif_dbgfs_clean(mvm, vif); + + /* + * For AP/GO interface, the tear down of the resources allocated to the + * interface is be handled as part of the stop_ap flow. + */ + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { +#ifdef CONFIG_NL80211_TESTMODE + if (vif == mvm->noa_vif) { + mvm->noa_vif = NULL; + mvm->noa_duration = 0; + } +#endif + iwl_mvm_dealloc_bcast_sta(mvm, vif); + goto out_release; + } + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvm->p2p_device_vif = NULL; + iwl_mvm_rm_bcast_sta(mvm, vif); + iwl_mvm_binding_remove_vif(mvm, vif); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + mvmvif->phy_ctxt = NULL; + } + + if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) + mvm->vif_count--; + + iwl_mvm_power_update_mac(mvm); + iwl_mvm_mac_ctxt_remove(mvm, vif); + +out_release: + iwl_mvm_mac_ctxt_release(mvm, vif); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +struct iwl_mvm_mc_iter_data { + struct iwl_mvm *mvm; + int port_id; +}; + +static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_mc_iter_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; + int ret, len; + + /* if we don't have free ports, mcast frames will be dropped */ + if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) + return; + + if (vif->type != NL80211_IFTYPE_STATION || + !vif->bss_conf.assoc) + return; + + cmd->port_id = data->port_id++; + memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); + len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); + + ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); + if (ret) + IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); +} + +static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) +{ + struct iwl_mvm_mc_iter_data iter_data = { + .mvm = mvm, + }; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) + return; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_mc_iface_iterator, &iter_data); +} + +static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mcast_filter_cmd *cmd; + struct netdev_hw_addr *addr; + int addr_count; + bool pass_all; + int len; + + addr_count = netdev_hw_addr_list_count(mc_list); + pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || + IWL_MVM_FW_MCAST_FILTER_PASS_ALL; + if (pass_all) + addr_count = 0; + + len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); + cmd = kzalloc(len, GFP_ATOMIC); + if (!cmd) + return 0; + + if (pass_all) { + cmd->pass_all = 1; + return (u64)(unsigned long)cmd; + } + + netdev_hw_addr_list_for_each(addr, mc_list) { + IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", + cmd->count, addr->addr); + memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], + addr->addr, ETH_ALEN); + cmd->count++; + } + + return (u64)(unsigned long)cmd; +} + +static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; + + mutex_lock(&mvm->mutex); + + /* replace previous configuration */ + kfree(mvm->mcast_filter_cmd); + mvm->mcast_filter_cmd = cmd; + + if (!cmd) + goto out; + + iwl_mvm_recalc_multicast(mvm); +out: + mutex_unlock(&mvm->mutex); + *total_flags = 0; +} + +static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int filter_flags, + unsigned int changed_flags) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* We support only filter for probe requests */ + if (!(changed_flags & FIF_PROBE_REQ)) + return; + + /* Supported only for p2p client interfaces */ + if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || + !vif->p2p) + return; + + mutex_lock(&mvm->mutex); + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + mutex_unlock(&mvm->mutex); +} + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING +struct iwl_bcast_iter_data { + struct iwl_mvm *mvm; + struct iwl_bcast_filter_cmd *cmd; + u8 current_filter; +}; + +static void +iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, + const struct iwl_fw_bcast_filter *in_filter, + struct iwl_fw_bcast_filter *out_filter) +{ + struct iwl_fw_bcast_filter_attr *attr; + int i; + + memcpy(out_filter, in_filter, sizeof(*out_filter)); + + for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { + attr = &out_filter->attrs[i]; + + if (!attr->mask) + break; + + switch (attr->reserved1) { + case cpu_to_le16(BC_FILTER_MAGIC_IP): + if (vif->bss_conf.arp_addr_cnt != 1) { + attr->mask = 0; + continue; + } + + attr->val = vif->bss_conf.arp_addr_list[0]; + break; + case cpu_to_le16(BC_FILTER_MAGIC_MAC): + attr->val = *(__be32 *)&vif->addr[2]; + break; + default: + break; + } + attr->reserved1 = 0; + out_filter->num_attrs++; + } +} + +static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_bcast_iter_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct iwl_bcast_filter_cmd *cmd = data->cmd; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_fw_bcast_mac *bcast_mac; + int i; + + if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) + return; + + bcast_mac = &cmd->macs[mvmvif->id]; + + /* + * enable filtering only for associated stations, but not for P2P + * Clients + */ + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || + !vif->bss_conf.assoc) + return; + + bcast_mac->default_discard = 1; + + /* copy all configured filters */ + for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { + /* + * Make sure we don't exceed our filters limit. + * if there is still a valid filter to be configured, + * be on the safe side and just allow bcast for this mac. + */ + if (WARN_ON_ONCE(data->current_filter >= + ARRAY_SIZE(cmd->filters))) { + bcast_mac->default_discard = 0; + bcast_mac->attached_filters = 0; + break; + } + + iwl_mvm_set_bcast_filter(vif, + &mvm->bcast_filters[i], + &cmd->filters[data->current_filter]); + + /* skip current filter if it contains no attributes */ + if (!cmd->filters[data->current_filter].num_attrs) + continue; + + /* attach the filter to current mac */ + bcast_mac->attached_filters |= + cpu_to_le16(BIT(data->current_filter)); + + data->current_filter++; + } +} + +bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, + struct iwl_bcast_filter_cmd *cmd) +{ + struct iwl_bcast_iter_data iter_data = { + .mvm = mvm, + .cmd = cmd, + }; + + if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) + return false; + + memset(cmd, 0, sizeof(*cmd)); + cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); + cmd->max_macs = ARRAY_SIZE(cmd->macs); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* use debugfs filters/macs if override is configured */ + if (mvm->dbgfs_bcast_filtering.override) { + memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, + sizeof(cmd->filters)); + memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, + sizeof(cmd->macs)); + return true; + } +#endif + + /* if no filters are configured, do nothing */ + if (!mvm->bcast_filters) + return false; + + /* configure and attach these filters for each associated sta vif */ + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bcast_filter_iterator, &iter_data); + + return true; +} +static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_bcast_filter_cmd cmd; + + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) + return 0; + + if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) + return 0; + + return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, + sizeof(cmd), &cmd); +} +#else +static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + return 0; +} +#endif + +static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + /* + * Re-calculate the tsf id, as the master-slave relations depend on the + * beacon interval, which was not known when the station interface was + * added. + */ + if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) + iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); + + /* + * If we're not associated yet, take the (new) BSSID before associating + * so the firmware knows. If we're already associated, then use the old + * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC + * branch for disassociation below. + */ + if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) + memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); + if (ret) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + /* after sending it once, adopt mac80211 data */ + memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + mvmvif->associated = bss_conf->assoc; + + if (changes & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + memset(&mvmvif->beacon_stats, 0, + sizeof(mvmvif->beacon_stats)); + + /* add quota for this interface */ + ret = iwl_mvm_update_quotas(mvm, true, NULL); + if (ret) { + IWL_ERR(mvm, "failed to update quotas\n"); + return; + } + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) { + /* + * If we're restarting then the firmware will + * obviously have lost synchronisation with + * the AP. It will attempt to synchronise by + * itself, but we can make it more reliable by + * scheduling a session protection time event. + * + * The firmware needs to receive a beacon to + * catch up with synchronisation, use 110% of + * the beacon interval. + * + * Set a large maximum delay to allow for more + * than a single interface. + */ + u32 dur = (11 * vif->bss_conf.beacon_int) / 10; + iwl_mvm_protect_session(mvm, vif, dur, dur, + 5 * dur, false); + } + + iwl_mvm_sf_update(mvm, vif, false); + iwl_mvm_power_vif_assoc(mvm, vif); + if (vif->p2p) { + iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC); + } + } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + /* + * If update fails - SF might be running in associated + * mode while disassociated - which is forbidden. + */ + WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), + "Failed to update SF upon disassociation\n"); + + /* remove AP station now that the MAC is unassoc */ + ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); + if (ret) + IWL_ERR(mvm, "failed to remove AP station\n"); + + if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + /* remove quota for this interface */ + ret = iwl_mvm_update_quotas(mvm, false, NULL); + if (ret) + IWL_ERR(mvm, "failed to update quotas\n"); + + if (vif->p2p) + iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); + + /* this will take the cleared BSSID from bss_conf */ + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (ret) + IWL_ERR(mvm, + "failed to update MAC %pM (clear after unassoc)\n", + vif->addr); + } + + iwl_mvm_recalc_multicast(mvm); + iwl_mvm_configure_bcast_filter(mvm, vif); + + /* reset rssi values */ + mvmvif->bf_data.ave_beacon_signal = 0; + + iwl_mvm_bt_coex_vif_change(mvm); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, + IEEE80211_SMPS_AUTOMATIC); + } else if (changes & BSS_CHANGED_BEACON_INFO) { + /* + * We received a beacon _after_ association so + * remove the session protection. + */ + iwl_mvm_remove_time_event(mvm, mvmvif, + &mvmvif->time_event_data); + } + + if (changes & BSS_CHANGED_BEACON_INFO) { + iwl_mvm_sf_update(mvm, vif, false); + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + } + + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } + + if (changes & BSS_CHANGED_TXPOWER) { + IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", + bss_conf->txpower); + iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); + } + + if (changes & BSS_CHANGED_CQM) { + IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); + /* reset cqm events tracking */ + mvmvif->bf_data.last_cqm_event = 0; + if (mvmvif->bf_data.bf_enabled) { + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + IWL_ERR(mvm, + "failed to update CQM thresholds\n"); + } + } + + if (changes & BSS_CHANGED_ARP_FILTER) { + IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); + iwl_mvm_configure_bcast_filter(mvm, vif); + } +} + +static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + /* + * iwl_mvm_mac_ctxt_add() might read directly from the device + * (the system time), so make sure it is available. + */ + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); + if (ret) + goto out_unlock; + + /* + * Re-calculate the tsf id, as the master-slave relations depend on the + * beacon interval, which was not known when the AP interface was added. + */ + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); + + mvmvif->ap_assoc_sta_count = 0; + + /* Add the mac context */ + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; + + /* Perform the binding */ + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (ret) + goto out_remove; + + /* Send the bcast station. At this stage the TBTT and DTIM time events + * are added and applied to the scheduler */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) + goto out_unbind; + + /* must be set before quota calculations */ + mvmvif->ap_ibss_active = true; + + /* power updated needs to be done before quotas */ + iwl_mvm_power_update_mac(mvm); + + ret = iwl_mvm_update_quotas(mvm, false, NULL); + if (ret) + goto out_quota_failed; + + /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); + + iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); + + iwl_mvm_bt_coex_vif_change(mvm); + + /* we don't support TDLS during DCM */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + goto out_unlock; + +out_quota_failed: + iwl_mvm_power_update_mac(mvm); + mvmvif->ap_ibss_active = false; + iwl_mvm_send_rm_bcast_sta(mvm, vif); +out_unbind: + iwl_mvm_binding_remove_vif(mvm, vif); +out_remove: + iwl_mvm_mac_ctxt_remove(mvm, vif); +out_unlock: + mutex_unlock(&mvm->mutex); + iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); + return ret; +} + +static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + iwl_mvm_prepare_mac_removal(mvm, vif); + + mutex_lock(&mvm->mutex); + + /* Handle AP stop while in CSA */ + if (rcu_access_pointer(mvm->csa_vif) == vif) { + iwl_mvm_remove_time_event(mvm, mvmvif, + &mvmvif->time_event_data); + RCU_INIT_POINTER(mvm->csa_vif, NULL); + mvmvif->csa_countdown = false; + } + + if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { + RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); + mvm->csa_tx_block_bcn_timeout = 0; + } + + mvmvif->ap_ibss_active = false; + mvm->ap_last_beacon_gp2 = 0; + + iwl_mvm_bt_coex_vif_change(mvm); + + iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); + + /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); + + iwl_mvm_update_quotas(mvm, false, NULL); + iwl_mvm_send_rm_bcast_sta(mvm, vif); + iwl_mvm_binding_remove_vif(mvm, vif); + + iwl_mvm_power_update_mac(mvm); + + iwl_mvm_mac_ctxt_remove(mvm, vif); + + mutex_unlock(&mvm->mutex); +} + +static void +iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* Changes will be applied when the AP/IBSS is started */ + if (!mvmvif->ap_ibss_active) + return; + + if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | + BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + /* Need to send a new beacon template to the FW */ + if (changes & BSS_CHANGED_BEACON && + iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) + IWL_WARN(mvm, "Failed updating beacon data\n"); + + if (changes & BSS_CHANGED_TXPOWER) { + IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", + bss_conf->txpower); + iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); + } + +} + +static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* + * iwl_mvm_bss_info_changed_station() might call + * iwl_mvm_protect_session(), which reads directly from + * the device (the system time), so make sure it is available. + */ + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) + return; + + mutex_lock(&mvm->mutex); + + if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); + break; + default: + /* shouldn't happen */ + WARN_ON_ONCE(1); + } + + mutex_unlock(&mvm->mutex); + iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); +} + +static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + if (hw_req->req.n_channels == 0 || + hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) + return -EINVAL; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a hw_scan when it's already stopped. This can + * happen, for instance, if we stopped the scan ourselves, + * called ieee80211_scan_completed() and the userspace called + * cancel scan scan before ieee80211_scan_work() could run. + * To handle that, simply return if the scan is not running. + */ + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); + + mutex_unlock(&mvm->mutex); +} + +static void +iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tids, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* Called when we need to transmit (a) frame(s) from mac80211 */ + + iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, + tids, more_data, false); +} + +static void +iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tids, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* Called when we need to transmit (a) frame(s) from agg queue */ + + iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, + tids, more_data, true); +} + +static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned long txqs = 0, tids = 0; + int tid; + + spin_lock_bh(&mvmsta->lock); + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + + if (tid_data->state != IWL_AGG_ON && + tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) + continue; + + __set_bit(tid_data->txq_id, &txqs); + + if (iwl_mvm_tid_queued(tid_data) == 0) + continue; + + __set_bit(tid, &tids); + } + + switch (cmd) { + case STA_NOTIFY_SLEEP: + if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) + ieee80211_sta_block_awake(hw, sta, true); + + for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) + ieee80211_sta_set_buffered(sta, tid, true); + + if (txqs) + iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); + /* + * The fw updates the STA to be asleep. Tx packets on the Tx + * queues to this station will not be transmitted. The fw will + * send a Tx response with TX_STATUS_FAIL_DEST_PS. + */ + break; + case STA_NOTIFY_AWAKE: + if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + break; + + if (txqs) + iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); + iwl_mvm_sta_modify_ps_wake(mvm, sta); + break; + default: + break; + } + spin_unlock_bh(&mvmsta->lock); +} + +static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + /* + * This is called before mac80211 does RCU synchronisation, + * so here we already invalidate our internal RCU-protected + * station pointer. The rest of the code will thus no longer + * be able to find the station this way, and we don't rely + * on further RCU synchronisation after the sta_state() + * callback deleted the station. + */ + mutex_lock(&mvm->mutex); + if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], + ERR_PTR(-ENOENT)); + + if (mvm_sta->vif->type == NL80211_IFTYPE_AP) { + mvmvif->ap_assoc_sta_count--; + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + const u8 *bssid) +{ + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) + return; + + if (iwlwifi_mod_params.uapsd_disable) { + vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + return; + } + + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; +} + +static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", + sta->addr, old_state, new_state); + + /* this would be a mac80211 bug ... but don't crash */ + if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + return -EINVAL; + + /* if a STA is being removed, reuse its ID */ + flush_work(&mvm->sta_drained_wk); + + mutex_lock(&mvm->mutex); + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + /* + * Firmware bug - it'll crash if the beacon interval is less + * than 16. We can't avoid connecting at all, so refuse the + * station state change, this will cause mac80211 to abandon + * attempts to connect to this AP, and eventually wpa_s will + * blacklist the AP... + */ + if (vif->type == NL80211_IFTYPE_STATION && + vif->bss_conf.beacon_int < 16) { + IWL_ERR(mvm, + "AP %pM beacon interval is %d, refusing due to firmware bug!\n", + sta->addr, vif->bss_conf.beacon_int); + ret = -EINVAL; + goto out_unlock; + } + + if (sta->tdls && + (vif->p2p || + iwl_mvm_tdls_sta_count(mvm, NULL) == + IWL_MVM_TDLS_STA_COUNT || + iwl_mvm_phy_ctx_count(mvm) > 1)) { + IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); + ret = -EBUSY; + goto out_unlock; + } + + ret = iwl_mvm_add_sta(mvm, vif, sta); + if (sta->tdls && ret == 0) + iwl_mvm_recalc_tdls_state(mvm, vif, true); + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_AUTH) { + /* + * EBS may be disabled due to previous failures reported by FW. + * Reset EBS status here assuming environment has been changed. + */ + mvm->last_ebs_successful = true; + iwl_mvm_check_uapsd(mvm, vif, sta->addr); + ret = 0; + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = iwl_mvm_update_sta(mvm, vif, sta); + if (ret == 0) + iwl_mvm_rs_rate_init(mvm, sta, + mvmvif->phy_ctxt->channel->band, + true); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { + + /* we don't support TDLS during DCM */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + ret = 0; + } else if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + /* disable beacon filtering */ + WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); + ret = 0; + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + ret = 0; + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_NONE) { + ret = 0; + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) { + ret = iwl_mvm_rm_sta(mvm, vif, sta); + if (sta->tdls) + iwl_mvm_recalc_tdls_state(mvm, vif, false); + } else { + ret = -EIO; + } + out_unlock: + mutex_unlock(&mvm->mutex); + + if (sta->tdls && ret == 0) { + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) + ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); + else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) + ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); + } + + return ret; +} + +static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mvm->rts_threshold = value; + + return 0; +} + +static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + if (vif->type == NL80211_IFTYPE_STATION && + changed & IEEE80211_RC_NSS_CHANGED) + iwl_mvm_sf_update(mvm, vif, false); +} + +static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->queue_params[ac] = *params; + + /* + * No need to update right away, we'll get BSS_CHANGED_QOS + * The exception is P2P_DEVICE interface which needs immediate update. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + mutex_unlock(&mvm->mutex); + return ret; + } + return 0; +} + +static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS, + 200 + vif->bss_conf.beacon_int); + u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS, + 100 + vif->bss_conf.beacon_int); + + if (WARN_ON_ONCE(vif->bss_conf.assoc)) + return; + + /* + * iwl_mvm_protect_session() reads directly from the device + * (the system time), so make sure it is available. + */ + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) + return; + + mutex_lock(&mvm->mutex); + /* Try really hard to protect the session and hear a beacon */ + iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); +} + +static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + int ret; + + mutex_lock(&mvm->mutex); + + if (!vif->bss_conf.idle) { + ret = -EBUSY; + goto out; + } + + ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a sched_scan when it's already stopped. This + * can happen, for instance, if we stopped the scan ourselves, + * called ieee80211_sched_scan_stopped() and the userspace called + * stop sched scan scan before ieee80211_sched_scan_stopped_work() + * could run. To handle this, simply return if the scan is + * not running. + */ + if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { + mutex_unlock(&mvm->mutex); + return 0; + } + + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); + mutex_unlock(&mvm->mutex); + iwl_mvm_wait_for_async_handlers(mvm); + + return ret; +} + +static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + if (iwlwifi_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* For non-client mode, only use WEP keys for TX as we probably + * don't have a station yet anyway and would then have to keep + * track of the keys, linking them to each of the clients/peers + * as they appear. For now, don't do that, for performance WEP + * offload doesn't really matter much, but we need it for some + * other offload features in client mode. + */ + if (vif->type != NL80211_IFTYPE_STATION) + return 0; + break; + default: + /* currently FW supports only one optional cipher scheme */ + if (hw->n_cipher_schemes && + hw->cipher_schemes->cipher == key->cipher) + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + else + return -EOPNOTSUPP; + } + + mutex_lock(&mvm->mutex); + + switch (cmd) { + case SET_KEY: + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_AP) && !sta) { + /* + * GTK on AP interface is a TX-only key, return 0; + * on IBSS they're per-station and because we're lazy + * we don't support them for RX, so do the same. + */ + ret = 0; + key->hw_key_idx = STA_KEY_IDX_INVALID; + break; + } + + /* During FW restart, in order to restore the state as it was, + * don't try to reprogram keys we previously failed for. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + key->hw_key_idx == STA_KEY_IDX_INVALID) { + IWL_DEBUG_MAC80211(mvm, + "skip invalid idx key programming during restart\n"); + ret = 0; + break; + } + + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, + test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)); + if (ret) { + IWL_WARN(mvm, "set key failed\n"); + /* + * can't add key for RX, but we don't need it + * in the device for TX so still return 0 + */ + key->hw_key_idx = STA_KEY_IDX_INVALID; + ret = 0; + } + + break; + case DISABLE_KEY: + if (key->hw_key_idx == STA_KEY_IDX_INVALID) { + ret = 0; + break; + } + + IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); + ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) + return; + + iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); +} + + +static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_hs20_roc_res *resp; + int resp_len = iwl_rx_packet_payload_len(pkt); + struct iwl_mvm_time_event_data *te_data = data; + + if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); + return true; + } + + resp = (void *)pkt->data; + + IWL_DEBUG_TE(mvm, + "Aux ROC: Recieved response from ucode: status=%d uid=%d\n", + resp->status, resp->event_unique_id); + + te_data->uid = le32_to_cpu(resp->event_unique_id); + IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", + te_data->uid); + + spin_lock_bh(&mvm->time_event_lock); + list_add_tail(&te_data->list, &mvm->aux_roc_te_list); + spin_unlock_bh(&mvm->time_event_lock); + + return true; +} + +#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200 +static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration) +{ + int res, time_reg = DEVICE_SYSTEM_TIME_REG; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; + static const u16 time_event_response[] = { HOT_SPOT_CMD }; + struct iwl_notification_wait wait_time_event; + struct iwl_hs20_roc_req aux_roc_req = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), + .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), + /* Set the channel info data */ + .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ? + PHY_BAND_24 : PHY_BAND_5, + .channel_info.channel = channel->hw_value, + .channel_info.width = PHY_VHT_CHANNEL_MODE20, + /* Set the time and duration */ + .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), + .apply_time_max_delay = + cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)), + .duration = cpu_to_le32(MSEC_TO_TU(duration)), + }; + + /* Set the node address */ + memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->time_event_lock); + + if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { + spin_unlock_bh(&mvm->time_event_lock); + return -EIO; + } + + te_data->vif = vif; + te_data->duration = duration; + te_data->id = HOT_SPOT_CMD; + + spin_unlock_bh(&mvm->time_event_lock); + + /* + * Use a notification wait, which really just processes the + * command response and doesn't wait for anything, in order + * to be able to process the response and get the UID inside + * the RX path. Using CMD_WANT_SKB doesn't work because it + * stores the buffer and then wakes up this thread, by which + * time another notification (that the time event started) + * might already be processed unsuccessfully. + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, + time_event_response, + ARRAY_SIZE(time_event_response), + iwl_mvm_rx_aux_roc, te_data); + + res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), + &aux_roc_req); + + if (res) { + IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); + iwl_remove_notification(&mvm->notif_wait, &wait_time_event); + goto out_clear_te; + } + + /* No need to wait for anything, so just pass 1 (0 isn't valid) */ + res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); + /* should never fail */ + WARN_ON_ONCE(res); + + if (res) { + out_clear_te: + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); + } + + return res; +} + +static int iwl_mvm_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *channel, + int duration, + enum ieee80211_roc_type type) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct cfg80211_chan_def chandef; + struct iwl_mvm_phy_ctxt *phy_ctxt; + int ret, i; + + IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, + duration, type); + + flush_work(&mvm->roc_done_wk); + + mutex_lock(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { + /* Use aux roc framework (HS20) */ + ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, + vif, duration); + goto out_unlock; + } + IWL_ERR(mvm, "hotspot not supported\n"); + ret = -EINVAL; + goto out_unlock; + case NL80211_IFTYPE_P2P_DEVICE: + /* handle below */ + break; + default: + IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); + ret = -EINVAL; + goto out_unlock; + } + + for (i = 0; i < NUM_PHY_CTX; i++) { + phy_ctxt = &mvm->phy_ctxts[i]; + if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) + continue; + + if (phy_ctxt->ref && channel == phy_ctxt->channel) { + /* + * Unbind the P2P_DEVICE from the current PHY context, + * and if the PHY context is not used remove it. + */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + + /* Bind the P2P_DEVICE to the current PHY Context */ + mvmvif->phy_ctxt = phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + goto schedule_time_event; + } + } + + /* Need to update the PHY context only if the ROC channel changed */ + if (channel == mvmvif->phy_ctxt->channel) + goto schedule_time_event; + + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); + + /* + * Change the PHY context configuration as it is currently referenced + * only by the P2P Device MAC + */ + if (mvmvif->phy_ctxt->ref == 1) { + ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, + &chandef, 1, 1); + if (ret) + goto out_unlock; + } else { + /* + * The PHY context is shared with other MACs. Need to remove the + * P2P Device from the binding, allocate an new PHY context and + * create a new binding + */ + phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!phy_ctxt) { + ret = -ENOSPC; + goto out_unlock; + } + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, + 1, 1); + if (ret) { + IWL_ERR(mvm, "Failed to change PHY context\n"); + goto out_unlock; + } + + /* Unbind the P2P_DEVICE from the current PHY context */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + + /* Bind the P2P_DEVICE to the new allocated PHY context */ + mvmvif->phy_ctxt = phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + } + +schedule_time_event: + /* Schedule the time events */ + ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); + +out_unlock: + mutex_unlock(&mvm->mutex); + IWL_DEBUG_MAC80211(mvm, "leave\n"); + return ret; +} + +static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + IWL_DEBUG_MAC80211(mvm, "enter\n"); + + mutex_lock(&mvm->mutex); + iwl_mvm_stop_roc(mvm); + mutex_unlock(&mvm->mutex); + + IWL_DEBUG_MAC80211(mvm, "leave\n"); + return 0; +} + +static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx) +{ + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); + + phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!phy_ctxt) { + ret = -ENOSPC; + goto out; + } + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); + if (ret) { + IWL_ERR(mvm, "Failed to add PHY context\n"); + goto out; + } + + iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); + *phy_ctxt_id = phy_ctxt->id; +out: + return ret; +} + +static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_add_chanctx(mvm, ctx); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx) +{ + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); +} + +static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + __iwl_mvm_remove_chanctx(mvm, ctx); + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + + if (WARN_ONCE((phy_ctxt->ref > 1) && + (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | + IEEE80211_CHANCTX_CHANGE_RX_CHAINS | + IEEE80211_CHANCTX_CHANGE_RADAR | + IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), + "Cannot change PHY. Ref=%d, changed=0x%X\n", + phy_ctxt->ref, changed)) + return; + + mutex_lock(&mvm->mutex); + iwl_mvm_bt_coex_vif_change(mvm); + iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); + mutex_unlock(&mvm->mutex); +} + +static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + lockdep_assert_held(&mvm->mutex); + + mvmvif->phy_ctxt = phy_ctxt; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + /* only needed if we're switching chanctx (i.e. during CSA) */ + if (switching_chanctx) { + mvmvif->ap_ibss_active = true; + break; + } + case NL80211_IFTYPE_ADHOC: + /* + * The AP binding flow is handled as part of the start_ap flow + * (in bss_info_changed), similarly for IBSS. + */ + ret = 0; + goto out; + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_MONITOR: + /* always disable PS when a monitor interface is active */ + mvmvif->ps_disabled = true; + break; + default: + ret = -EINVAL; + goto out; + } + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (ret) + goto out; + + /* + * Power state must be updated before quotas, + * otherwise fw will complain. + */ + iwl_mvm_power_update_mac(mvm); + + /* Setting the quota at this stage is only required for monitor + * interfaces. For the other types, the bss_info changed flow + * will handle quota settings. + */ + if (vif->type == NL80211_IFTYPE_MONITOR) { + mvmvif->monitor_active = true; + ret = iwl_mvm_update_quotas(mvm, false, NULL); + if (ret) + goto out_remove_binding; + } + + /* Handle binding during CSA */ + if (vif->type == NL80211_IFTYPE_AP) { + iwl_mvm_update_quotas(mvm, false, NULL); + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } + + if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { + u32 duration = 2 * vif->bss_conf.beacon_int; + + /* iwl_mvm_protect_session() reads directly from the + * device (the system time), so make sure it is + * available. + */ + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); + if (ret) + goto out_remove_binding; + + /* Protect the session to make sure we hear the first + * beacon on the new channel. + */ + iwl_mvm_protect_session(mvm, vif, duration, duration, + vif->bss_conf.beacon_int / 2, + true); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); + + iwl_mvm_update_quotas(mvm, false, NULL); + } + + goto out; + +out_remove_binding: + iwl_mvm_binding_remove_vif(mvm, vif); + iwl_mvm_power_update_mac(mvm); +out: + if (ret) + mvmvif->phy_ctxt = NULL; + return ret; +} +static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_vif *disabled_vif = NULL; + + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); + + switch (vif->type) { + case NL80211_IFTYPE_ADHOC: + goto out; + case NL80211_IFTYPE_MONITOR: + mvmvif->monitor_active = false; + mvmvif->ps_disabled = false; + break; + case NL80211_IFTYPE_AP: + /* This part is triggered only during CSA */ + if (!switching_chanctx || !mvmvif->ap_ibss_active) + goto out; + + mvmvif->csa_countdown = false; + + /* Set CS bit on all the stations */ + iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); + + /* Save blocked iface, the timeout is set on the next beacon */ + rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); + + mvmvif->ap_ibss_active = false; + break; + case NL80211_IFTYPE_STATION: + if (!switching_chanctx) + break; + + disabled_vif = vif; + + iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); + break; + default: + break; + } + + iwl_mvm_update_quotas(mvm, false, disabled_vif); + iwl_mvm_binding_remove_vif(mvm, vif); + +out: + mvmvif->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); +} + +static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); + mutex_unlock(&mvm->mutex); +} + +static int +iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, + struct ieee80211_vif_chanctx_switch *vifs) +{ + int ret; + + mutex_lock(&mvm->mutex); + __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); + + ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); + if (ret) { + IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); + goto out_reassign; + } + + ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, + true); + if (ret) { + IWL_ERR(mvm, + "failed to assign new_ctx during channel switch\n"); + goto out_remove; + } + + /* we don't support TDLS during DCM - can be caused by channel switch */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + goto out; + +out_remove: + __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); + +out_reassign: + if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { + IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); + goto out_restart; + } + + if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, + true)) { + IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); + goto out_restart; + } + + goto out; + +out_restart: + /* things keep failing, better restart the hw */ + iwl_mvm_nic_restart(mvm, false); + +out: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int +iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, + struct ieee80211_vif_chanctx_switch *vifs) +{ + int ret; + + mutex_lock(&mvm->mutex); + __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + + ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, + true); + if (ret) { + IWL_ERR(mvm, + "failed to assign new_ctx during channel switch\n"); + goto out_reassign; + } + + goto out; + +out_reassign: + if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, + true)) { + IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); + goto out_restart; + } + + goto out; + +out_restart: + /* things keep failing, better restart the hw */ + iwl_mvm_nic_restart(mvm, false); + +out: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + /* we only support a single-vif right now */ + if (n_vifs > 1) + return -EOPNOTSUPP; + + switch (mode) { + case CHANCTX_SWMODE_SWAP_CONTEXTS: + ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); + break; + case CHANCTX_SWMODE_REASSIGN_VIF: + ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int iwl_mvm_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + bool set) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + if (!mvm_sta || !mvm_sta->vif) { + IWL_ERR(mvm, "Station is not associated to a vif\n"); + return -EINVAL; + } + + return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); +} + +#ifdef CONFIG_NL80211_TESTMODE +static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { + [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, + [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, + [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, +}; + +static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + void *data, int len) +{ + struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; + int err; + u32 noa_duration; + + err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy); + if (err) + return err; + + if (!tb[IWL_MVM_TM_ATTR_CMD]) + return -EINVAL; + + switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { + case IWL_MVM_TM_CMD_SET_NOA: + if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || + !vif->bss_conf.enable_beacon || + !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) + return -EINVAL; + + noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); + if (noa_duration >= vif->bss_conf.beacon_int) + return -EINVAL; + + mvm->noa_duration = noa_duration; + mvm->noa_vif = vif; + + return iwl_mvm_update_quotas(mvm, false, NULL); + case IWL_MVM_TM_CMD_SET_BEACON_FILTER: + /* must be associated client vif - ignore authorized */ + if (!vif || vif->type != NL80211_IFTYPE_STATION || + !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || + !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) + return -EINVAL; + + if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) + return iwl_mvm_enable_beacon_filter(mvm, vif, 0); + return iwl_mvm_disable_beacon_filter(mvm, vif, 0); + } + + return -EOPNOTSUPP; +} + +static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int err; + + mutex_lock(&mvm->mutex); + err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); + mutex_unlock(&mvm->mutex); + + return err; +} +#endif + +static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + /* By implementing this operation, we prevent mac80211 from + * starting its own channel switch timer, so that we can call + * ieee80211_chswitch_done() ourselves at the right time + * (which is when the absence time event starts). + */ + + IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), + "dummy channel switch op\n"); +} + +static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *csa_vif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 apply_time; + int ret; + + mutex_lock(&mvm->mutex); + + mvmvif->csa_failed = false; + + IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", + chsw->chandef.center_freq1); + + iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH); + + switch (vif->type) { + case NL80211_IFTYPE_AP: + csa_vif = + rcu_dereference_protected(mvm->csa_vif, + lockdep_is_held(&mvm->mutex)); + if (WARN_ONCE(csa_vif && csa_vif->csa_active, + "Another CSA is already in progress")) { + ret = -EBUSY; + goto out_unlock; + } + + rcu_assign_pointer(mvm->csa_vif, vif); + + if (WARN_ONCE(mvmvif->csa_countdown, + "Previous CSA countdown didn't complete")) { + ret = -EBUSY; + goto out_unlock; + } + + break; + case NL80211_IFTYPE_STATION: + /* Schedule the time event to a bit before beacon 1, + * to make sure we're in the new channel when the + * GO/AP arrives. + */ + apply_time = chsw->device_timestamp + + ((vif->bss_conf.beacon_int * (chsw->count - 1) - + IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); + + if (chsw->block_tx) + iwl_mvm_csa_client_absent(mvm, vif); + + iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, + apply_time); + if (mvmvif->bf_data.bf_enabled) { + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; + } + + break; + default: + break; + } + + mvmvif->ps_disabled = true; + + ret = iwl_mvm_power_update_ps(mvm); + if (ret) + goto out_unlock; + + /* we won't be on this channel any longer */ + iwl_mvm_teardown_tdls_peers(mvm); + +out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + if (mvmvif->csa_failed) { + mvmvif->csa_failed = false; + ret = -EIO; + goto out_unlock; + } + + if (vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (WARN_ON(!mvmsta)) { + ret = -EIO; + goto out_unlock; + } + + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; + + iwl_mvm_stop_session_protection(mvm, vif); + } + + mvmvif->ps_disabled = false; + + ret = iwl_mvm_power_update_ps(mvm); + +out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u32 queues, bool drop) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_sta *sta; + int i; + u32 msk = 0; + + if (!vif || vif->type != NL80211_IFTYPE_STATION) + return; + + mutex_lock(&mvm->mutex); + mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* flush the AP-station and all TDLS peers */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (mvmsta->vif != vif) + continue; + + /* make sure only TDLS peers or the AP are flushed */ + WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); + + msk |= mvmsta->tfd_queue_msk; + } + + if (drop) { + if (iwl_mvm_flush_tx_path(mvm, msk, 0)) + IWL_ERR(mvm, "flush request fail\n"); + mutex_unlock(&mvm->mutex); + } else { + mutex_unlock(&mvm->mutex); + + /* this can take a while, and we may need/want other operations + * to succeed while doing this, so do it without the mutex held + */ + iwl_trans_wait_tx_queue_empty(mvm->trans, msk); + } +} + +static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + memset(survey, 0, sizeof(*survey)); + + /* only support global statistics right now */ + if (idx != 0) + return -ENOENT; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + return -ENOENT; + + mutex_lock(&mvm->mutex); + + if (mvm->ucode_loaded) { + ret = iwl_mvm_request_statistics(mvm, false); + if (ret) + goto out; + } + + survey->filled = SURVEY_INFO_TIME | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_SCAN; + survey->time = mvm->accu_radio_stats.on_time_rf + + mvm->radio_stats.on_time_rf; + do_div(survey->time, USEC_PER_MSEC); + + survey->time_rx = mvm->accu_radio_stats.rx_time + + mvm->radio_stats.rx_time; + do_div(survey->time_rx, USEC_PER_MSEC); + + survey->time_tx = mvm->accu_radio_stats.tx_time + + mvm->radio_stats.tx_time; + do_div(survey->time_tx, USEC_PER_MSEC); + + survey->time_scan = mvm->accu_radio_stats.on_time_scan + + mvm->radio_stats.on_time_scan; + do_div(survey->time_scan, USEC_PER_MSEC); + + ret = 0; + out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + return; + + /* if beacon filtering isn't on mac80211 does it anyway */ + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) + return; + + if (!vif->bss_conf.assoc) + return; + + mutex_lock(&mvm->mutex); + + if (mvmvif->ap_sta_id != mvmsta->sta_id) + goto unlock; + + if (iwl_mvm_request_statistics(mvm, false)) + goto unlock; + + sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + + mvmvif->beacon_stats.accu_num_beacons; + sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + if (mvmvif->beacon_stats.avg_signal) { + /* firmware only reports a value after RXing a few beacons */ + sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; + sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + } + unlock: + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ +#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \ + do { \ + if ((_cnt) && --(_cnt)) \ + break; \ + iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\ + } while (0) + + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_mlme *trig_mlme; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); + trig_mlme = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (event->u.mlme.data == ASSOC_EVENT) { + if (event->u.mlme.status == MLME_DENIED) + CHECK_MLME_TRIGGER(mvm, trig, buf, + trig_mlme->stop_assoc_denied, + "DENIED ASSOC: reason %d", + event->u.mlme.reason); + else if (event->u.mlme.status == MLME_TIMEOUT) + CHECK_MLME_TRIGGER(mvm, trig, buf, + trig_mlme->stop_assoc_timeout, + "ASSOC TIMEOUT"); + } else if (event->u.mlme.data == AUTH_EVENT) { + if (event->u.mlme.status == MLME_DENIED) + CHECK_MLME_TRIGGER(mvm, trig, buf, + trig_mlme->stop_auth_denied, + "DENIED AUTH: reason %d", + event->u.mlme.reason); + else if (event->u.mlme.status == MLME_TIMEOUT) + CHECK_MLME_TRIGGER(mvm, trig, buf, + trig_mlme->stop_auth_timeout, + "AUTH TIMEOUT"); + } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { + CHECK_MLME_TRIGGER(mvm, trig, buf, + trig_mlme->stop_rx_deauth, + "DEAUTH RX %d", event->u.mlme.reason); + } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { + CHECK_MLME_TRIGGER(mvm, trig, buf, + trig_mlme->stop_tx_deauth, + "DEAUTH TX %d", event->u.mlme.reason); + } +#undef CHECK_MLME_TRIGGER +} + +static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "BAR received from %pM, tid %d, ssn %d", + event->u.ba.sta->addr, event->u.ba.tid, + event->u.ba.ssn); +} + +static void +iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Frame from %pM timed out, tid %d", + event->u.ba.sta->addr, event->u.ba.tid); +} + +static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + switch (event->type) { + case MLME_EVENT: + iwl_mvm_event_mlme_callback(mvm, vif, event); + break; + case BAR_RX_EVENT: + iwl_mvm_event_bar_rx_callback(mvm, vif, event); + break; + case BA_FRAME_TIMEOUT: + iwl_mvm_event_frame_timeout_callback(mvm, vif, event); + break; + default: + break; + } +} + +const struct ieee80211_ops iwl_mvm_hw_ops = { + .tx = iwl_mvm_mac_tx, + .ampdu_action = iwl_mvm_mac_ampdu_action, + .start = iwl_mvm_mac_start, + .reconfig_complete = iwl_mvm_mac_reconfig_complete, + .stop = iwl_mvm_mac_stop, + .add_interface = iwl_mvm_mac_add_interface, + .remove_interface = iwl_mvm_mac_remove_interface, + .config = iwl_mvm_mac_config, + .prepare_multicast = iwl_mvm_prepare_multicast, + .configure_filter = iwl_mvm_configure_filter, + .config_iface_filter = iwl_mvm_config_iface_filter, + .bss_info_changed = iwl_mvm_bss_info_changed, + .hw_scan = iwl_mvm_mac_hw_scan, + .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, + .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, + .sta_state = iwl_mvm_mac_sta_state, + .sta_notify = iwl_mvm_mac_sta_notify, + .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, + .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, + .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, + .sta_rc_update = iwl_mvm_sta_rc_update, + .conf_tx = iwl_mvm_mac_conf_tx, + .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, + .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, + .flush = iwl_mvm_mac_flush, + .sched_scan_start = iwl_mvm_mac_sched_scan_start, + .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, + .set_key = iwl_mvm_mac_set_key, + .update_tkip_key = iwl_mvm_mac_update_tkip_key, + .remain_on_channel = iwl_mvm_roc, + .cancel_remain_on_channel = iwl_mvm_cancel_roc, + .add_chanctx = iwl_mvm_add_chanctx, + .remove_chanctx = iwl_mvm_remove_chanctx, + .change_chanctx = iwl_mvm_change_chanctx, + .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, + .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, + .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, + + .start_ap = iwl_mvm_start_ap_ibss, + .stop_ap = iwl_mvm_stop_ap_ibss, + .join_ibss = iwl_mvm_start_ap_ibss, + .leave_ibss = iwl_mvm_stop_ap_ibss, + + .set_tim = iwl_mvm_set_tim, + + .channel_switch = iwl_mvm_channel_switch, + .pre_channel_switch = iwl_mvm_pre_channel_switch, + .post_channel_switch = iwl_mvm_post_channel_switch, + + .tdls_channel_switch = iwl_mvm_tdls_channel_switch, + .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, + .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, + + .event_callback = iwl_mvm_mac_event_callback, + + CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) + +#ifdef CONFIG_PM_SLEEP + /* look at d3.c */ + .suspend = iwl_mvm_suspend, + .resume = iwl_mvm_resume, + .set_wakeup = iwl_mvm_set_wakeup, + .set_rekey_data = iwl_mvm_set_rekey_data, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = iwl_mvm_ipv6_addr_change, +#endif + .set_default_unicast_key = iwl_mvm_set_default_unicast_key, +#endif + .get_survey = iwl_mvm_mac_get_survey, + .sta_statistics = iwl_mvm_mac_sta_statistics, +}; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h new file mode 100644 index 000000000000..4bde2d027dcd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -0,0 +1,1535 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __IWL_MVM_H__ +#define __IWL_MVM_H__ + +#include +#include +#include +#include + +#include "iwl-op-mode.h" +#include "iwl-trans.h" +#include "iwl-notif-wait.h" +#include "iwl-eeprom-parse.h" +#include "iwl-fw-file.h" +#include "iwl-config.h" +#include "sta.h" +#include "fw-api.h" +#include "constants.h" +#include "tof.h" + +#define IWL_MVM_MAX_ADDRESSES 5 +/* RSSI offset for WkP */ +#define IWL_RSSI_OFFSET 50 +#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 +/* A TimeUnit is 1024 microsecond */ +#define MSEC_TO_TU(_msec) (_msec*1000/1024) + +/* For GO, this value represents the number of TUs before CSA "beacon + * 0" TBTT when the CSA time-event needs to be scheduled to start. It + * must be big enough to ensure that we switch in time. + */ +#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 + +/* For client, this value represents the number of TUs before CSA + * "beacon 1" TBTT, instead. This is because we don't know when the + * GO/AP will be in the new channel, so we switch early enough. + */ +#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 + +/* + * This value (in TUs) is used to fine tune the CSA NoA end time which should + * be just before "beacon 0" TBTT. + */ +#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 + +/* + * Number of beacons to transmit on a new channel until we unblock tx to + * the stations, even if we didn't identify them on a new channel + */ +#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 + +extern const struct ieee80211_ops iwl_mvm_hw_ops; + +/** + * struct iwl_mvm_mod_params - module parameters for iwlmvm + * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. + * We will register to mac80211 to have testmode working. The NIC must not + * be up'ed after the INIT fw asserted. This is useful to be able to use + * proprietary tools over testmode to debug the INIT fw. + * @tfd_q_hang_detect: enabled the detection of hung transmit queues + * @power_scheme: one of enum iwl_power_scheme + */ +struct iwl_mvm_mod_params { + bool init_dbg; + bool tfd_q_hang_detect; + int power_scheme; +}; +extern struct iwl_mvm_mod_params iwlmvm_mod_params; + +/** + * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump + * + * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode + * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the + * transport's data. + * @trans_len: length of the valid data in trans_ptr + * @op_mode_len: length of the valid data in op_mode_ptr + */ +struct iwl_mvm_dump_ptrs { + struct iwl_trans_dump_data *trans_ptr; + void *op_mode_ptr; + u32 op_mode_len; +}; + +/** + * struct iwl_mvm_dump_desc - describes the dump + * @len: length of trig_desc->data + * @trig_desc: the description of the dump + */ +struct iwl_mvm_dump_desc { + size_t len; + /* must be last */ + struct iwl_fw_error_dump_trigger_desc trig_desc; +}; + +extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; + +struct iwl_mvm_phy_ctxt { + u16 id; + u16 color; + u32 ref; + + /* + * TODO: This should probably be removed. Currently here only for rate + * scaling algorithm + */ + struct ieee80211_channel *channel; +}; + +struct iwl_mvm_time_event_data { + struct ieee80211_vif *vif; + struct list_head list; + unsigned long end_jiffies; + u32 duration; + bool running; + u32 uid; + + /* + * The access to the 'id' field must be done when the + * mvm->time_event_lock is held, as it value is used to indicate + * if the te is in the time event list or not (when id == TE_MAX) + */ + u32 id; +}; + + /* Power management */ + +/** + * enum iwl_power_scheme + * @IWL_POWER_LEVEL_CAM - Continuously Active Mode + * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) + * @IWL_POWER_LEVEL_LP - Low Power + */ +enum iwl_power_scheme { + IWL_POWER_SCHEME_CAM = 1, + IWL_POWER_SCHEME_BPS, + IWL_POWER_SCHEME_LP +}; + +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 +#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 + +#ifdef CONFIG_IWLWIFI_DEBUGFS +enum iwl_dbgfs_pm_mask { + MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), + MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), + MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), + MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), + MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), + MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), + MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), + MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), + MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), + MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), +}; + +struct iwl_dbgfs_pm { + u16 keep_alive_seconds; + u32 rx_data_timeout; + u32 tx_data_timeout; + bool skip_over_dtim; + u8 skip_dtim_periods; + bool lprx_ena; + u32 lprx_rssi_threshold; + bool snooze_ena; + bool uapsd_misbehaving; + bool use_ps_poll; + int mask; +}; + +/* beacon filtering */ + +enum iwl_dbgfs_bf_mask { + MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), + MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), + MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), + MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), + MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), + MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), + MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), + MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), + MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), + MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), + MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), +}; + +struct iwl_dbgfs_bf { + u32 bf_energy_delta; + u32 bf_roaming_energy_delta; + u32 bf_roaming_state; + u32 bf_temp_threshold; + u32 bf_temp_fast_filter; + u32 bf_temp_slow_filter; + u32 bf_enable_beacon_filter; + u32 bf_debug_flag; + u32 bf_escape_timer; + u32 ba_escape_timer; + u32 ba_enable_beacon_abort; + int mask; +}; +#endif + +enum iwl_mvm_smps_type_request { + IWL_MVM_SMPS_REQ_BT_COEX, + IWL_MVM_SMPS_REQ_TT, + IWL_MVM_SMPS_REQ_PROT, + NUM_IWL_MVM_SMPS_REQ, +}; + +enum iwl_mvm_ref_type { + IWL_MVM_REF_UCODE_DOWN, + IWL_MVM_REF_SCAN, + IWL_MVM_REF_ROC, + IWL_MVM_REF_ROC_AUX, + IWL_MVM_REF_P2P_CLIENT, + IWL_MVM_REF_AP_IBSS, + IWL_MVM_REF_USER, + IWL_MVM_REF_TX, + IWL_MVM_REF_TX_AGG, + IWL_MVM_REF_ADD_IF, + IWL_MVM_REF_START_AP, + IWL_MVM_REF_BSS_CHANGED, + IWL_MVM_REF_PREPARE_TX, + IWL_MVM_REF_PROTECT_TDLS, + IWL_MVM_REF_CHECK_CTKILL, + IWL_MVM_REF_PRPH_READ, + IWL_MVM_REF_PRPH_WRITE, + IWL_MVM_REF_NMI, + IWL_MVM_REF_TM_CMD, + IWL_MVM_REF_EXIT_WORK, + IWL_MVM_REF_PROTECT_CSA, + IWL_MVM_REF_FW_DBG_COLLECT, + + /* update debugfs.c when changing this */ + + IWL_MVM_REF_COUNT, +}; + +enum iwl_bt_force_ant_mode { + BT_FORCE_ANT_DIS = 0, + BT_FORCE_ANT_AUTO, + BT_FORCE_ANT_BT, + BT_FORCE_ANT_WIFI, + + BT_FORCE_ANT_MAX, +}; + +/** +* struct iwl_mvm_vif_bf_data - beacon filtering related data +* @bf_enabled: indicates if beacon filtering is enabled +* @ba_enabled: indicated if beacon abort is enabled +* @ave_beacon_signal: average beacon signal +* @last_cqm_event: rssi of the last cqm event +* @bt_coex_min_thold: minimum threshold for BT coex +* @bt_coex_max_thold: maximum threshold for BT coex +* @last_bt_coex_event: rssi of the last BT coex event +*/ +struct iwl_mvm_vif_bf_data { + bool bf_enabled; + bool ba_enabled; + int ave_beacon_signal; + int last_cqm_event; + int bt_coex_min_thold; + int bt_coex_max_thold; + int last_bt_coex_event; +}; + +/** + * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context + * @id: between 0 and 3 + * @color: to solve races upon MAC addition and removal + * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA + * @bssid: BSSID for this (client) interface + * @associated: indicates that we're currently associated, used only for + * managing the firmware state in iwl_mvm_bss_info_changed_station() + * @ap_assoc_sta_count: count of stations associated to us - valid only + * if VIF type is AP + * @uploaded: indicates the MAC context has been added to the device + * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface + * should get quota etc. + * @pm_enabled - Indicate if MAC power management is allowed + * @monitor_active: indicates that monitor context is configured, and that the + * interface should get quota etc. + * @low_latency: indicates that this interface is in low-latency mode + * (VMACLowLatencyMode) + * @ps_disabled: indicates that this interface requires PS to be disabled + * @queue_params: QoS params for this MAC + * @bcast_sta: station used for broadcast packets. Used by the following + * vifs: P2P_DEVICE, GO and AP. + * @beacon_skb: the skb used to hold the AP/GO beacon template + * @smps_requests: the SMPS requests of different parts of the driver, + * combined on update to yield the overall request to mac80211. + * @beacon_stats: beacon statistics, containing the # of received beacons, + * # of received beacons accumulated over FW restart, and the current + * average signal of beacons retrieved from the firmware + * @csa_failed: CSA failed to schedule time event, report an error later + * @features: hw features active for this vif + */ +struct iwl_mvm_vif { + struct iwl_mvm *mvm; + u16 id; + u16 color; + u8 ap_sta_id; + + u8 bssid[ETH_ALEN]; + bool associated; + u8 ap_assoc_sta_count; + + bool uploaded; + bool ap_ibss_active; + bool pm_enabled; + bool monitor_active; + bool low_latency; + bool ps_disabled; + struct iwl_mvm_vif_bf_data bf_data; + + struct { + u32 num_beacons, accu_num_beacons; + u8 avg_signal; + } beacon_stats; + + u32 ap_beacon_time; + + enum iwl_tsf_id tsf_id; + + /* + * QoS data from mac80211, need to store this here + * as mac80211 has a separate callback but we need + * to have the data for the MAC context + */ + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + struct iwl_mvm_time_event_data time_event_data; + struct iwl_mvm_time_event_data hs_time_event_data; + + struct iwl_mvm_int_sta bcast_sta; + + /* + * Assigned while mac80211 has the interface in a channel context, + * or, for P2P Device, while it exists. + */ + struct iwl_mvm_phy_ctxt *phy_ctxt; + +#ifdef CONFIG_PM_SLEEP + /* WoWLAN GTK rekey data */ + struct { + u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; + __le64 replay_ctr; + bool valid; + } rekey_data; + + int tx_key_idx; + + bool seqno_valid; + u16 seqno; +#endif + +#if IS_ENABLED(CONFIG_IPV6) + /* IPv6 addresses for WoWLAN */ + struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; + int num_target_ipv6_addrs; +#endif + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct dentry *dbgfs_dir; + struct dentry *dbgfs_slink; + struct iwl_dbgfs_pm dbgfs_pm; + struct iwl_dbgfs_bf dbgfs_bf; + struct iwl_mac_power_cmd mac_pwr_cmd; +#endif + + enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; + + /* FW identified misbehaving AP */ + u8 uapsd_misbehaving_bssid[ETH_ALEN]; + + /* Indicates that CSA countdown may be started */ + bool csa_countdown; + bool csa_failed; + + /* TCP Checksum Offload */ + netdev_features_t features; +}; + +static inline struct iwl_mvm_vif * +iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) +{ + return (void *)vif->drv_priv; +} + +extern const u8 tid_to_mac80211_ac[]; + +#define IWL_MVM_SCAN_STOPPING_SHIFT 8 + +enum iwl_scan_status { + IWL_MVM_SCAN_REGULAR = BIT(0), + IWL_MVM_SCAN_SCHED = BIT(1), + IWL_MVM_SCAN_NETDETECT = BIT(2), + + IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), + IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), + IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), + + IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | + IWL_MVM_SCAN_STOPPING_REGULAR, + IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | + IWL_MVM_SCAN_STOPPING_SCHED, + IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | + IWL_MVM_SCAN_STOPPING_NETDETECT, + + IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, + IWL_MVM_SCAN_MASK = 0xff, +}; + +/** + * struct iwl_nvm_section - describes an NVM section in memory. + * + * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, + * and saved for later use by the driver. Not all NVM sections are saved + * this way, only the needed ones. + */ +struct iwl_nvm_section { + u16 length; + const u8 *data; +}; + +/** + * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure + * @ct_kill_exit: worker to exit thermal kill + * @dynamic_smps: Is thermal throttling enabled dynamic_smps? + * @tx_backoff: The current thremal throttling tx backoff in uSec. + * @min_backoff: The minimal tx backoff due to power restrictions + * @params: Parameters to configure the thermal throttling algorithm. + * @throttle: Is thermal throttling is active? + */ +struct iwl_mvm_tt_mgmt { + struct delayed_work ct_kill_exit; + bool dynamic_smps; + u32 tx_backoff; + u32 min_backoff; + struct iwl_tt_params params; + bool throttle; +}; + +#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 + +struct iwl_mvm_frame_stats { + u32 legacy_frames; + u32 ht_frames; + u32 vht_frames; + u32 bw_20_frames; + u32 bw_40_frames; + u32 bw_80_frames; + u32 bw_160_frames; + u32 sgi_frames; + u32 ngi_frames; + u32 siso_frames; + u32 mimo2_frames; + u32 agg_frames; + u32 ampdu_count; + u32 success_frames; + u32 fail_frames; + u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; + int last_frame_idx; +}; + +enum { + D0I3_DEFER_WAKEUP, + D0I3_PENDING_WAKEUP, +}; + +#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff +#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 +#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 + +enum iwl_mvm_tdls_cs_state { + IWL_MVM_TDLS_SW_IDLE = 0, + IWL_MVM_TDLS_SW_REQ_SENT, + IWL_MVM_TDLS_SW_RESP_RCVD, + IWL_MVM_TDLS_SW_REQ_RCVD, + IWL_MVM_TDLS_SW_ACTIVE, +}; + +struct iwl_mvm_shared_mem_cfg { + u32 shared_mem_addr; + u32 shared_mem_size; + u32 sample_buff_addr; + u32 sample_buff_size; + u32 txfifo_addr; + u32 txfifo_size[TX_FIFO_MAX_NUM]; + u32 rxfifo_size[RX_FIFO_MAX_NUM]; + u32 page_buff_addr; + u32 page_buff_size; +}; + +struct iwl_mvm { + /* for logger access */ + struct device *dev; + + struct iwl_trans *trans; + const struct iwl_fw *fw; + const struct iwl_cfg *cfg; + struct iwl_phy_db *phy_db; + struct ieee80211_hw *hw; + + /* for protecting access to iwl_mvm */ + struct mutex mutex; + struct list_head async_handlers_list; + spinlock_t async_handlers_lock; + struct work_struct async_handlers_wk; + + struct work_struct roc_done_wk; + + unsigned long status; + + /* + * for beacon filtering - + * currently only one interface can be supported + */ + struct iwl_mvm_vif *bf_allowed_vif; + + enum iwl_ucode_type cur_ucode; + bool ucode_loaded; + bool calibrating; + u32 error_event_table; + u32 log_event_table; + u32 umac_error_event_table; + bool support_umac_log; + struct iwl_sf_region sf_space; + + u32 ampdu_ref; + + struct iwl_notif_wait_data notif_wait; + + struct mvm_statistics_rx rx_stats; + + struct { + u64 rx_time; + u64 tx_time; + u64 on_time_rf; + u64 on_time_scan; + } radio_stats, accu_radio_stats; + + struct { + /* Map to HW queue */ + u32 hw_queue_to_mac80211; + u8 hw_queue_refcount; + bool setup_reserved; + u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ + } queue_info[IWL_MAX_HW_QUEUES]; + spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ + atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; + + const char *nvm_file_name; + struct iwl_nvm_data *nvm_data; + /* NVM sections */ + struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; + + /* Paging section */ + struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; + + /* EEPROM MAC addresses */ + struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; + + /* data related to data path */ + struct iwl_rx_phy_info last_phy_info; + struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; + struct work_struct sta_drained_wk; + unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; + atomic_t pending_frames[IWL_MVM_STATION_COUNT]; + u32 tfd_drained[IWL_MVM_STATION_COUNT]; + u8 rx_ba_sessions; + + /* configured by mac80211 */ + u32 rts_threshold; + + /* Scan status, cmd (pre-allocated) and auxiliary station */ + unsigned int scan_status; + void *scan_cmd; + struct iwl_mcast_filter_cmd *mcast_filter_cmd; + + /* max number of simultaneous scans the FW supports */ + unsigned int max_scans; + + /* UMAC scan tracking */ + u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; + + /* rx chain antennas set through debugfs for the scan command */ + u8 scan_rx_ant; + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING + /* broadcast filters to configure for each associated station */ + const struct iwl_fw_bcast_filter *bcast_filters; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct { + bool override; + struct iwl_bcast_filter_cmd cmd; + } dbgfs_bcast_filtering; +#endif +#endif + + /* Internal station */ + struct iwl_mvm_int_sta aux_sta; + + bool last_ebs_successful; + + u8 scan_last_antenna_idx; /* to toggle TX between antennas */ + u8 mgmt_last_antenna_idx; + + /* last smart fifo state that was successfully sent to firmware */ + enum iwl_sf_state sf_state; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + u32 dbgfs_prph_reg_addr; + bool disable_power_off; + bool disable_power_off_d3; + + bool scan_iter_notif_enabled; + + struct debugfs_blob_wrapper nvm_hw_blob; + struct debugfs_blob_wrapper nvm_sw_blob; + struct debugfs_blob_wrapper nvm_calib_blob; + struct debugfs_blob_wrapper nvm_prod_blob; + struct debugfs_blob_wrapper nvm_phy_sku_blob; + + struct iwl_mvm_frame_stats drv_rx_stats; + spinlock_t drv_stats_lock; + u16 dbgfs_rx_phyinfo; +#endif + + struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; + + struct list_head time_event_list; + spinlock_t time_event_lock; + + /* + * A bitmap indicating the index of the key in use. The firmware + * can hold 16 keys at most. Reflect this fact. + */ + unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; + u8 fw_key_deleted[STA_KEY_MAX_NUM]; + + /* references taken by the driver and spinlock protecting them */ + spinlock_t refs_lock; + u8 refs[IWL_MVM_REF_COUNT]; + + u8 vif_count; + + /* -1 for always, 0 for never, >0 for that many times */ + s8 restart_fw; + u8 fw_dbg_conf; + struct delayed_work fw_dump_wk; + struct iwl_mvm_dump_desc *fw_dump_desc; + struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; + +#ifdef CONFIG_IWLWIFI_LEDS + struct led_classdev led; +#endif + + struct ieee80211_vif *p2p_device_vif; + +#ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan; + int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; + + /* sched scan settings for net detect */ + struct cfg80211_sched_scan_request *nd_config; + struct ieee80211_scan_ies nd_ies; + struct cfg80211_match_set *nd_match_sets; + int n_nd_match_sets; + struct ieee80211_channel **nd_channels; + int n_nd_channels; + bool net_detect; +#ifdef CONFIG_IWLWIFI_DEBUGFS + bool d3_wake_sysassert; + bool d3_test_active; + bool store_d3_resume_sram; + void *d3_resume_sram; + u32 d3_test_pme_ptr; + struct ieee80211_vif *keep_vif; + u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ +#endif +#endif + + /* d0i3 */ + u8 d0i3_ap_sta_id; + bool d0i3_offloading; + struct work_struct d0i3_exit_work; + struct sk_buff_head d0i3_tx; + /* protect d0i3_suspend_flags */ + struct mutex d0i3_suspend_mutex; + unsigned long d0i3_suspend_flags; + /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ + spinlock_t d0i3_tx_lock; + wait_queue_head_t d0i3_exit_waitq; + + /* BT-Coex */ + u8 bt_ack_kill_msk[NUM_PHY_CTX]; + u8 bt_cts_kill_msk[NUM_PHY_CTX]; + + struct iwl_bt_coex_profile_notif_old last_bt_notif_old; + struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old; + struct iwl_bt_coex_profile_notif last_bt_notif; + struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; + + u32 last_ant_isol; + u8 last_corun_lut; + u8 bt_tx_prio; + enum iwl_bt_force_ant_mode bt_force_ant_mode; + + /* Aux ROC */ + struct list_head aux_roc_te_list; + + /* Thermal Throttling and CTkill */ + struct iwl_mvm_tt_mgmt thermal_throttle; + s32 temperature; /* Celsius */ + /* + * Debug option to set the NIC temperature. This option makes the + * driver think this is the actual NIC temperature, and ignore the + * real temperature that is received from the fw + */ + bool temperature_test; /* Debug test temperature is enabled */ + + struct iwl_time_quota_cmd last_quota_cmd; + +#ifdef CONFIG_NL80211_TESTMODE + u32 noa_duration; + struct ieee80211_vif *noa_vif; +#endif + + /* Tx queues */ + u8 aux_queue; + u8 first_agg_queue; + u8 last_agg_queue; + + /* Indicate if device power save is allowed */ + u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ + + struct ieee80211_vif __rcu *csa_vif; + struct ieee80211_vif __rcu *csa_tx_blocked_vif; + u8 csa_tx_block_bcn_timeout; + + /* system time of last beacon (for AP/GO interface) */ + u32 ap_last_beacon_gp2; + + bool lar_regdom_set; + enum iwl_mcc_source mcc_src; + + u8 low_latency_agg_frame_limit; + + /* TDLS channel switch data */ + struct { + struct delayed_work dwork; + enum iwl_mvm_tdls_cs_state state; + + /* + * Current cs sta - might be different from periodic cs peer + * station. Value is meaningless when the cs-state is idle. + */ + u8 cur_sta_id; + + /* TDLS periodic channel-switch peer */ + struct { + u8 sta_id; + u8 op_class; + bool initiator; /* are we the link initiator */ + struct cfg80211_chan_def chandef; + struct sk_buff *skb; /* ch sw template */ + u32 ch_sw_tm_ie; + + /* timestamp of last ch-sw request sent (GP2 time) */ + u32 sent_timestamp; + } peer; + } tdls_cs; + + struct iwl_mvm_shared_mem_cfg shared_mem_cfg; + + u32 ciphers[6]; + struct iwl_mvm_tof_data tof_data; +}; + +/* Extract MVM priv from op_mode and _hw */ +#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ + ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) + +#define IWL_MAC80211_GET_MVM(_hw) \ + IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) + +enum iwl_mvm_status { + IWL_MVM_STATUS_HW_RFKILL, + IWL_MVM_STATUS_HW_CTKILL, + IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_IN_HW_RESTART, + IWL_MVM_STATUS_IN_D0I3, + IWL_MVM_STATUS_ROC_AUX_RUNNING, + IWL_MVM_STATUS_D3_RECONFIG, + IWL_MVM_STATUS_DUMPING_FW_LOG, +}; + +static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || + test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); +} + +static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); +} + +/* Must be called with rcu_read_lock() held and it can only be + * released when mvmsta is not needed anymore. + */ +static inline struct iwl_mvm_sta * +iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) +{ + struct ieee80211_sta *sta; + + if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) + return NULL; + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* This can happen if the station has been removed right now */ + if (IS_ERR_OR_NULL(sta)) + return NULL; + + return iwl_mvm_sta_from_mac80211(sta); +} + +static inline struct iwl_mvm_sta * +iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) +{ + struct ieee80211_sta *sta; + + if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) + return NULL; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + /* This can happen if the station has been removed right now */ + if (IS_ERR_OR_NULL(sta)) + return NULL; + + return iwl_mvm_sta_from_mac80211(sta); +} + +static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) +{ + return mvm->trans->cfg->d0i3 && + mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF && + !iwlwifi_mod_params.d0i3_disable && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); +} + +static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT); +} + +static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) +{ + bool nvm_lar = mvm->nvm_data->lar_enabled; + bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + + if (iwlwifi_mod_params.lar_disable) + return false; + + /* + * Enable LAR only if it is supported by the FW (TLV) && + * enabled in the NVM + */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) + return nvm_lar && tlv_lar; + else + return tlv_lar; +} + +static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); +} + +static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && + IWL_MVM_BT_COEX_CORUNNING; +} + +static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && + IWL_MVM_BT_COEX_RRC; +} + +static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); +} + +static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) +{ + /* firmware flag isn't defined yet */ + return false; +} + +extern const u8 iwl_mvm_ac_to_tx_fifo[]; + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ +}; + +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); +int __iwl_mvm_mac_start(struct iwl_mvm *mvm); + +/****************** + * MVM Methods + ******************/ +/* uCode */ +int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); + +/* Utils */ +int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, + enum ieee80211_band band); +void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, + enum ieee80211_band band, + struct ieee80211_tx_rate *r); +u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); +u8 first_antenna(u8 mask); +u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); + +/* Tx / Host Commands */ +int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, + struct iwl_host_cmd *cmd); +int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, + u32 flags, u16 len, const void *data); +int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, + struct iwl_host_cmd *cmd, + u32 *status); +int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, + u16 len, const void *data, + u32 *status); +int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta); +int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); +void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, u8 sta_id); +void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc); +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_mvm_get_tx_fail_reason(u32 status); +#else +static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } +#endif +int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags); +void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); + +static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); +} + +static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) +{ + flush_work(&mvm->async_handlers_wk); +} + +/* Statistics */ +void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt); +void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); +void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); + +/* NVM */ +int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); +int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); + +static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) +{ + return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? + mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : + mvm->fw->valid_tx_ant; +} + +static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) +{ + return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? + mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : + mvm->fw->valid_rx_ant; +} + +static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) +{ + u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | + FW_PHY_CFG_RX_CHAIN); + u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); + u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); + + phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | + valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; + + return mvm->fw->phy_config & phy_config; +} + +int iwl_mvm_up(struct iwl_mvm *mvm); +int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); + +int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); +bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, + struct iwl_bcast_filter_cmd *cmd); + +/* + * FW notifications / CMD responses handlers + * Convention: iwl_mvm_rx_ + */ +void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/* MVM PHY */ +int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic); +int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic); +void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); +void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); +int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); +u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); +u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); + +/* MAC (virtual interface) programming */ +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool force_assoc_off, const u8 *bssid_override); +int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *exclude_vif); +/* Bindings */ +int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + +/* Quota management */ +int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, + struct ieee80211_vif *disabled_vif); + +/* Scanning */ +int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies); +int iwl_mvm_scan_size(struct iwl_mvm *mvm); +int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); +int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); +void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); + +/* Scheduled scan */ +void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies, + int type); +void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/* UMAC scan */ +int iwl_mvm_config_scan(struct iwl_mvm *mvm); +void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/* MVM debugfs */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); +void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +#else +static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, + struct dentry *dbgfs_dir) +{ + return 0; +} +static inline void +iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ +} +static inline void +iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ +} +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + +/* rate scaling */ +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); +void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); +int rs_pretty_print_rate(char *buf, const u32 rate); +void rs_update_last_rssi(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_rx_status *rx_status); + +/* power management */ +int iwl_mvm_power_update_device(struct iwl_mvm *mvm); +int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); +int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); +int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + char *buf, int bufsz); + +void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +#ifdef CONFIG_IWLWIFI_LEDS +int iwl_mvm_leds_init(struct iwl_mvm *mvm); +void iwl_mvm_leds_exit(struct iwl_mvm *mvm); +#else +static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) +{ + return 0; +} +static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) +{ +} +#endif + +/* D3 (WoWLAN, NetDetect) */ +int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); +int iwl_mvm_resume(struct ieee80211_hw *hw); +void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); +void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data); +void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev); +void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int idx); +extern const struct file_operations iwl_dbgfs_d3_test_ops; +#ifdef CONFIG_PM_SLEEP +void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#else +static inline void +iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ +} +#endif +void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, + struct iwl_wowlan_config_cmd *cmd); +int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool disable_offloading, + u32 cmd_flags); + +/* D0i3 */ +void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); +void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); +int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); +bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); +void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); +int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode); +int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode); +int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); + +/* BT Coex */ +int iwl_send_bt_init_conf(struct iwl_mvm *mvm); +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum ieee80211_rssi_event_data); +void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); +u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); +bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); +bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); +bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); +bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, + enum ieee80211_band band); +u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info, u8 ac); + +bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); +void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); +int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); +void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum ieee80211_rssi_event_data); +u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); +bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); +bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, + enum ieee80211_band band); +void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/* beacon filtering */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd); +#else +static inline void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{} +#endif +int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool enable, u32 flags); +int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags); +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags); +/* SMPS */ +void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request); +bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm); + +/* Low latency */ +int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool value); +/* get SystemLowLatencyMode - only needed for beacon threshold? */ +bool iwl_mvm_low_latency(struct iwl_mvm *mvm); +/* get VMACLowLatencyMode */ +static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) +{ + /* + * should this consider associated/active/... state? + * + * Normally low-latency should only be active on interfaces + * that are active, but at least with debugfs it can also be + * enabled on interfaces that aren't active. However, when + * interface aren't active then they aren't added into the + * binding, so this has no real impact. For now, just return + * the current desired low-latency state. + */ + + return mvmvif->low_latency; +} + +/* hw scheduler queue config */ +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout); +/* + * Disable a TXQ. + * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. + */ +void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags); +int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq); + +static inline +void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 fifo, u16 ssn, unsigned int wdg_timeout) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + + iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); +} + +static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, + int mac80211_queue, int fifo, + int sta_id, int tid, int frame_limit, + u16 ssn, unsigned int wdg_timeout) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = sta_id, + .tid = tid, + .frame_limit = frame_limit, + .aggregate = true, + }; + + iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); +} + +/* Thermal management and CT-kill */ +void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); +void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); +void iwl_mvm_temp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_tt_handler(struct iwl_mvm *mvm); +void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); +void iwl_mvm_tt_exit(struct iwl_mvm *mvm); +void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); +int iwl_mvm_get_temp(struct iwl_mvm *mvm); + +/* Location Aware Regulatory */ +struct iwl_mcc_update_resp * +iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, + enum iwl_mcc_source src_id); +int iwl_mvm_init_mcc(struct iwl_mvm *mvm); +void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, + const char *alpha2, + enum iwl_mcc_source src_id, + bool *changed); +struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, + bool *changed); +int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); +void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); + +/* smart fifo */ +int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool added_vif); + +/* TDLS */ + +/* + * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. + * This TID is marked as used vs the AP and all connected TDLS peers. + */ +#define IWL_MVM_TDLS_FW_TID 4 + +int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); +void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool sta_added); +void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); +void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params); +void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); + +struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); + +void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); +void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); + +int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); +int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, + struct iwl_mvm_dump_desc *desc, + struct iwl_fw_dbg_trigger_tlv *trigger); +void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); +int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) __printf(3, 4); +unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool tdls, bool cmd_q); +void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + const char *errmsg); +static inline bool +iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, + struct ieee80211_vif *vif) +{ + u32 trig_vif = le32_to_cpu(trig->vif_type); + + return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; +} + +static inline bool +iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && + (mvm->fw_dbg_conf == FW_DBG_INVALID || + (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); +} + +static inline bool +iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) + return false; + + return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); +} + +static inline void +iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_fw_dbg_trigger trig) +{ + struct iwl_fw_dbg_trigger_tlv *trigger; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig)) + return; + + trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig); + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); +} + +#endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c new file mode 100644 index 000000000000..2ee0f6fe56a1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -0,0 +1,864 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include +#include "iwl-trans.h" +#include "iwl-csr.h" +#include "mvm.h" +#include "iwl-eeprom-parse.h" +#include "iwl-eeprom-read.h" +#include "iwl-nvm-parse.h" +#include "iwl-prph.h" + +/* Default NVM size to read */ +#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) +#define IWL_MAX_NVM_SECTION_SIZE 0x1b58 +#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc + +#define NVM_WRITE_OPCODE 1 +#define NVM_READ_OPCODE 0 + +/* load nvm chunk response */ +enum { + READ_NVM_CHUNK_SUCCEED = 0, + READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 +}; + +/* + * prepare the NVM host command w/ the pointers to the nvm buffer + * and send it to fw + */ +static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, + u16 offset, u16 length, const u8 *data) +{ + struct iwl_nvm_access_cmd nvm_access_cmd = { + .offset = cpu_to_le16(offset), + .length = cpu_to_le16(length), + .type = cpu_to_le16(section), + .op_code = NVM_WRITE_OPCODE, + }; + struct iwl_host_cmd cmd = { + .id = NVM_ACCESS_CMD, + .len = { sizeof(struct iwl_nvm_access_cmd), length }, + .flags = CMD_SEND_IN_RFKILL, + .data = { &nvm_access_cmd, data }, + /* data may come from vmalloc, so use _DUP */ + .dataflags = { 0, IWL_HCMD_DFL_DUP }, + }; + + return iwl_mvm_send_cmd(mvm, &cmd); +} + +static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, + u16 offset, u16 length, u8 *data) +{ + struct iwl_nvm_access_cmd nvm_access_cmd = { + .offset = cpu_to_le16(offset), + .length = cpu_to_le16(length), + .type = cpu_to_le16(section), + .op_code = NVM_READ_OPCODE, + }; + struct iwl_nvm_access_resp *nvm_resp; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = NVM_ACCESS_CMD, + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &nvm_access_cmd, }, + }; + int ret, bytes_read, offset_read; + u8 *resp_data; + + cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + + /* Extract NVM response */ + nvm_resp = (void *)pkt->data; + ret = le16_to_cpu(nvm_resp->status); + bytes_read = le16_to_cpu(nvm_resp->length); + offset_read = le16_to_cpu(nvm_resp->offset); + resp_data = nvm_resp->data; + if (ret) { + if ((offset != 0) && + (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { + /* + * meaning of NOT_VALID_ADDRESS: + * driver try to read chunk from address that is + * multiple of 2K and got an error since addr is empty. + * meaning of (offset != 0): driver already + * read valid data from another chunk so this case + * is not an error. + */ + IWL_DEBUG_EEPROM(mvm->trans->dev, + "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", + offset); + ret = 0; + } else { + IWL_DEBUG_EEPROM(mvm->trans->dev, + "NVM access command failed with status %d (device: %s)\n", + ret, mvm->cfg->name); + ret = -EIO; + } + goto exit; + } + + if (offset_read != offset) { + IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", + offset_read); + ret = -EINVAL; + goto exit; + } + + /* Write data to NVM */ + memcpy(data + offset, resp_data, bytes_read); + ret = bytes_read; + +exit: + iwl_free_resp(&cmd); + return ret; +} + +static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, + const u8 *data, u16 length) +{ + int offset = 0; + + /* copy data in chunks of 2k (and remainder if any) */ + + while (offset < length) { + int chunk_size, ret; + + chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, + length - offset); + + ret = iwl_nvm_write_chunk(mvm, section, offset, + chunk_size, data + offset); + if (ret < 0) + return ret; + + offset += chunk_size; + } + + return 0; +} + +/* + * Reads an NVM section completely. + * NICs prior to 7000 family doesn't have a real NVM, but just read + * section 0 which is the EEPROM. Because the EEPROM reading is unlimited + * by uCode, we need to manually check in this case that we don't + * overflow and try to read more than the EEPROM size. + * For 7000 family NICs, we supply the maximal size we can read, and + * the uCode fills the response with as much data as we can, + * without overflowing, so no check is needed. + */ +static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, + u8 *data, u32 size_read) +{ + u16 length, offset = 0; + int ret; + + /* Set nvm section read length */ + length = IWL_NVM_DEFAULT_CHUNK_SIZE; + + ret = length; + + /* Read the NVM until exhausted (reading less than requested) */ + while (ret == length) { + /* Check no memory assumptions fail and cause an overflow */ + if ((size_read + offset + length) > + mvm->cfg->base_params->eeprom_size) { + IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); + return -ENOBUFS; + } + + ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); + if (ret < 0) { + IWL_DEBUG_EEPROM(mvm->trans->dev, + "Cannot read NVM from section %d offset %d, length %d\n", + section, offset, length); + return ret; + } + offset += ret; + } + + IWL_DEBUG_EEPROM(mvm->trans->dev, + "NVM section %d read completed\n", section); + return offset; +} + +static struct iwl_nvm_data * +iwl_parse_nvm_sections(struct iwl_mvm *mvm) +{ + struct iwl_nvm_section *sections = mvm->nvm_sections; + const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; + bool lar_enabled; + u32 mac_addr0, mac_addr1; + + /* Checking for required sections */ + if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || + !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { + IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); + return NULL; + } + } else { + /* SW and REGULATORY sections are mandatory */ + if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || + !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { + IWL_ERR(mvm, + "Can't parse empty family 8000 OTP/NVM sections\n"); + return NULL; + } + /* MAC_OVERRIDE or at least HW section must exist */ + if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && + !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { + IWL_ERR(mvm, + "Can't parse mac_address, empty sections\n"); + return NULL; + } + + /* PHY_SKU section is mandatory in B0 */ + if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { + IWL_ERR(mvm, + "Can't parse phy_sku in B0, empty sections\n"); + return NULL; + } + } + + if (WARN_ON(!mvm->cfg)) + return NULL; + + /* read the mac address from WFMP registers */ + mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); + mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); + + hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; + sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; + calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; + regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; + mac_override = + (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; + phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; + + lar_enabled = !iwlwifi_mod_params.lar_disable && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + + return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, + regulatory, mac_override, phy_sku, + mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, + lar_enabled, mac_addr0, mac_addr1, + mvm->trans->hw_id); +} + +#define MAX_NVM_FILE_LEN 16384 + +/* + * Reads external NVM from a file into mvm->nvm_sections + * + * HOW TO CREATE THE NVM FILE FORMAT: + * ------------------------------ + * 1. create hex file, format: + * 3800 -> header + * 0000 -> header + * 5a40 -> data + * + * rev - 6 bit (word1) + * len - 10 bit (word1) + * id - 4 bit (word2) + * rsv - 12 bit (word2) + * + * 2. flip 8bits with 8 bits per line to get the right NVM file format + * + * 3. create binary file from the hex file + * + * 4. save as "iNVM_xxx.bin" under /lib/firmware + */ +static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) +{ + int ret, section_size; + u16 section_id; + const struct firmware *fw_entry; + const struct { + __le16 word1; + __le16 word2; + u8 data[]; + } *file_sec; + const u8 *eof, *temp; + int max_section_size; + const __le32 *dword_buff; + +#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) +#define NVM_WORD2_ID(x) (x >> 12) +#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8)) +#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4) +#define NVM_HEADER_0 (0x2A504C54) +#define NVM_HEADER_1 (0x4E564D2A) +#define NVM_HEADER_SIZE (4 * sizeof(u32)) + + IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); + + /* Maximal size depends on HW family and step */ + if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + max_section_size = IWL_MAX_NVM_SECTION_SIZE; + else + max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE; + + /* + * Obtain NVM image via request_firmware. Since we already used + * request_firmware_nowait() for the firmware binary load and only + * get here after that we assume the NVM request can be satisfied + * synchronously. + */ + ret = request_firmware(&fw_entry, mvm->nvm_file_name, + mvm->trans->dev); + if (ret) { + IWL_ERR(mvm, "ERROR: %s isn't available %d\n", + mvm->nvm_file_name, ret); + return ret; + } + + IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", + mvm->nvm_file_name, fw_entry->size); + + if (fw_entry->size > MAX_NVM_FILE_LEN) { + IWL_ERR(mvm, "NVM file too large\n"); + ret = -EINVAL; + goto out; + } + + eof = fw_entry->data + fw_entry->size; + dword_buff = (__le32 *)fw_entry->data; + + /* some NVM file will contain a header. + * The header is identified by 2 dwords header as follow: + * dword[0] = 0x2A504C54 + * dword[1] = 0x4E564D2A + * + * This header must be skipped when providing the NVM data to the FW. + */ + if (fw_entry->size > NVM_HEADER_SIZE && + dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && + dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { + file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); + IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); + IWL_INFO(mvm, "NVM Manufacturing date %08X\n", + le32_to_cpu(dword_buff[3])); + + /* nvm file validation, dword_buff[2] holds the file version */ + if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP && + le32_to_cpu(dword_buff[2]) < 0xE4A) || + (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP && + le32_to_cpu(dword_buff[2]) >= 0xE4A)) { + ret = -EFAULT; + goto out; + } + } else { + file_sec = (void *)fw_entry->data; + } + + while (true) { + if (file_sec->data > eof) { + IWL_ERR(mvm, + "ERROR - NVM file too short for section header\n"); + ret = -EINVAL; + break; + } + + /* check for EOF marker */ + if (!file_sec->word1 && !file_sec->word2) { + ret = 0; + break; + } + + if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + section_size = + 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); + section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); + } else { + section_size = 2 * NVM_WORD2_LEN_FAMILY_8000( + le16_to_cpu(file_sec->word2)); + section_id = NVM_WORD1_ID_FAMILY_8000( + le16_to_cpu(file_sec->word1)); + } + + if (section_size > max_section_size) { + IWL_ERR(mvm, "ERROR - section too large (%d)\n", + section_size); + ret = -EINVAL; + break; + } + + if (!section_size) { + IWL_ERR(mvm, "ERROR - section empty\n"); + ret = -EINVAL; + break; + } + + if (file_sec->data + section_size > eof) { + IWL_ERR(mvm, + "ERROR - NVM file too short for section (%d bytes)\n", + section_size); + ret = -EINVAL; + break; + } + + if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, + "Invalid NVM section ID %d\n", section_id)) { + ret = -EINVAL; + break; + } + + temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } + kfree(mvm->nvm_sections[section_id].data); + mvm->nvm_sections[section_id].data = temp; + mvm->nvm_sections[section_id].length = section_size; + + /* advance to the next section */ + file_sec = (void *)(file_sec->data + section_size); + } +out: + release_firmware(fw_entry); + return ret; +} + +/* Loads the NVM data stored in mvm->nvm_sections into the NIC */ +int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) +{ + int i, ret = 0; + struct iwl_nvm_section *sections = mvm->nvm_sections; + + IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n"); + + for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) { + if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) + continue; + ret = iwl_nvm_write_section(mvm, i, sections[i].data, + sections[i].length); + if (ret < 0) { + IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); + break; + } + } + return ret; +} + +int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) +{ + int ret, section; + u32 size_read = 0; + u8 *nvm_buffer, *temp; + const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step; + const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; + + if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) + return -EINVAL; + + /* load NVM values from nic */ + if (read_nvm_from_nic) { + /* Read From FW NVM */ + IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); + + nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, + GFP_KERNEL); + if (!nvm_buffer) + return -ENOMEM; + for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { + /* we override the constness for initial read */ + ret = iwl_nvm_read_section(mvm, section, nvm_buffer, + size_read); + if (ret < 0) + continue; + size_read += ret; + temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } + mvm->nvm_sections[section].data = temp; + mvm->nvm_sections[section].length = ret; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + switch (section) { + case NVM_SECTION_TYPE_SW: + mvm->nvm_sw_blob.data = temp; + mvm->nvm_sw_blob.size = ret; + break; + case NVM_SECTION_TYPE_CALIBRATION: + mvm->nvm_calib_blob.data = temp; + mvm->nvm_calib_blob.size = ret; + break; + case NVM_SECTION_TYPE_PRODUCTION: + mvm->nvm_prod_blob.data = temp; + mvm->nvm_prod_blob.size = ret; + break; + case NVM_SECTION_TYPE_PHY_SKU: + mvm->nvm_phy_sku_blob.data = temp; + mvm->nvm_phy_sku_blob.size = ret; + break; + default: + if (section == mvm->cfg->nvm_hw_section_num) { + mvm->nvm_hw_blob.data = temp; + mvm->nvm_hw_blob.size = ret; + break; + } + } +#endif + } + if (!size_read) + IWL_ERR(mvm, "OTP is blank\n"); + kfree(nvm_buffer); + } + + /* Only if PNVM selected in the mod param - load external NVM */ + if (mvm->nvm_file_name) { + /* read External NVM file from the mod param */ + ret = iwl_mvm_read_external_nvm(mvm); + if (ret) { + /* choose the nvm_file name according to the + * HW step + */ + if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == + SILICON_B_STEP) + mvm->nvm_file_name = nvm_file_B; + else + mvm->nvm_file_name = nvm_file_C; + + if (ret == -EFAULT && mvm->nvm_file_name) { + /* in case nvm file was failed try again */ + ret = iwl_mvm_read_external_nvm(mvm); + if (ret) + return ret; + } else { + return ret; + } + } + } + + /* parse the relevant nvm sections */ + mvm->nvm_data = iwl_parse_nvm_sections(mvm); + if (!mvm->nvm_data) + return -ENODATA; + IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", + mvm->nvm_data->nvm_version); + + return 0; +} + +struct iwl_mcc_update_resp * +iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, + enum iwl_mcc_source src_id) +{ + struct iwl_mcc_update_cmd mcc_update_cmd = { + .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), + .source_id = (u8)src_id, + }; + struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = MCC_UPDATE_CMD, + .flags = CMD_WANT_SKB, + .data = { &mcc_update_cmd }, + }; + + int ret; + u32 status; + int resp_len, n_channels; + u16 mcc; + + if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) + return ERR_PTR(-EOPNOTSUPP); + + cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); + + IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", + alpha2[0], alpha2[1], src_id); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ERR_PTR(ret); + + pkt = cmd.resp_pkt; + + /* Extract MCC response */ + mcc_resp = (void *)pkt->data; + status = le32_to_cpu(mcc_resp->status); + + mcc = le16_to_cpu(mcc_resp->mcc); + + /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ + if (mcc == 0) { + mcc = 0x3030; /* "00" - world */ + mcc_resp->mcc = cpu_to_le16(mcc); + } + + n_channels = __le32_to_cpu(mcc_resp->n_channels); + IWL_DEBUG_LAR(mvm, + "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", + status, mcc, mcc >> 8, mcc & 0xff, + !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); + + resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32); + resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); + if (!resp_cp) { + ret = -ENOMEM; + goto exit; + } + + ret = 0; +exit: + iwl_free_resp(&cmd); + if (ret) + return ERR_PTR(ret); + return resp_cp; +} + +#ifdef CONFIG_ACPI +#define WRD_METHOD "WRDD" +#define WRDD_WIFI (0x07) +#define WRDD_WIGIG (0x10) + +static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd) +{ + union acpi_object *mcc_pkg, *domain_type, *mcc_value; + u32 i; + + if (wrdd->type != ACPI_TYPE_PACKAGE || + wrdd->package.count < 2 || + wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || + wrdd->package.elements[0].integer.value != 0) { + IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n"); + return 0; + } + + for (i = 1 ; i < wrdd->package.count ; ++i) { + mcc_pkg = &wrdd->package.elements[i]; + + if (mcc_pkg->type != ACPI_TYPE_PACKAGE || + mcc_pkg->package.count < 2 || + mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || + mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + mcc_pkg = NULL; + continue; + } + + domain_type = &mcc_pkg->package.elements[0]; + if (domain_type->integer.value == WRDD_WIFI) + break; + + mcc_pkg = NULL; + } + + if (mcc_pkg) { + mcc_value = &mcc_pkg->package.elements[1]; + return mcc_value->integer.value; + } + + return 0; +} + +static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) +{ + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + u32 mcc_val; + struct pci_dev *pdev = to_pci_dev(mvm->dev); + + root_handle = ACPI_HANDLE(&pdev->dev); + if (!root_handle) { + IWL_DEBUG_LAR(mvm, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_LAR(mvm, "WRD method not found\n"); + return -ENOENT; + } + + /* Call WRDD with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status); + return -ENOENT; + } + + mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer); + kfree(wrdd.pointer); + if (!mcc_val) + return -ENOENT; + + mcc[0] = (mcc_val >> 8) & 0xff; + mcc[1] = mcc_val & 0xff; + mcc[2] = '\0'; + return 0; +} +#else /* CONFIG_ACPI */ +static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) +{ + return -ENOENT; +} +#endif + +int iwl_mvm_init_mcc(struct iwl_mvm *mvm) +{ + bool tlv_lar; + bool nvm_lar; + int retval; + struct ieee80211_regdomain *regd; + char mcc[3]; + + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + nvm_lar = mvm->nvm_data->lar_enabled; + if (tlv_lar != nvm_lar) + IWL_INFO(mvm, + "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n", + tlv_lar ? "enabled" : "disabled", + nvm_lar ? "enabled" : "disabled"); + } + + if (!iwl_mvm_is_lar_supported(mvm)) + return 0; + + /* + * try to replay the last set MCC to FW. If it doesn't exist, + * queue an update to cfg80211 to retrieve the default alpha2 from FW. + */ + retval = iwl_mvm_init_fw_regd(mvm); + if (retval != -ENOENT) + return retval; + + /* + * Driver regulatory hint for initial update, this also informs the + * firmware we support wifi location updates. + * Disallow scans that might crash the FW while the LAR regdomain + * is not set. + */ + mvm->lar_regdom_set = false; + + regd = iwl_mvm_get_current_regdomain(mvm, NULL); + if (IS_ERR_OR_NULL(regd)) + return -EIO; + + if (iwl_mvm_is_wifi_mcc_supported(mvm) && + !iwl_mvm_get_bios_mcc(mvm, mcc)) { + kfree(regd); + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, + MCC_SOURCE_BIOS, NULL); + if (IS_ERR_OR_NULL(regd)) + return -EIO; + } + + retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); + kfree(regd); + return retval; +} + +void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mcc_chub_notif *notif = (void *)pkt->data; + enum iwl_mcc_source src; + char mcc[3]; + struct ieee80211_regdomain *regd; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) + return; + + mcc[0] = notif->mcc >> 8; + mcc[1] = notif->mcc & 0xff; + mcc[2] = '\0'; + src = notif->source_id; + + IWL_DEBUG_LAR(mvm, + "RX: received chub update mcc cmd (mcc '%s' src %d)\n", + mcc, src); + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); + if (IS_ERR_OR_NULL(regd)) + return; + + regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); + kfree(regd); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c new file mode 100644 index 000000000000..68b0169c8892 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -0,0 +1,217 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include "mvm.h" + +void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, + struct iwl_wowlan_config_cmd *cmd) +{ + int i; + + /* + * For QoS counters, we store the one to use next, so subtract 0x10 + * since the uCode will add 0x10 *before* using the value while we + * increment after using the value (i.e. store the next value to use). + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u16 seq = mvm_ap_sta->tid_data[i].seq_number; + seq -= 0x10; + cmd->qos_seq[i] = cpu_to_le16(seq); + } +} + +int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool disable_offloading, + u32 cmd_flags) +{ + union { + struct iwl_proto_offload_cmd_v1 v1; + struct iwl_proto_offload_cmd_v2 v2; + struct iwl_proto_offload_cmd_v3_small v3s; + struct iwl_proto_offload_cmd_v3_large v3l; + } cmd = {}; + struct iwl_host_cmd hcmd = { + .id = PROT_OFFLOAD_CONFIG_CMD, + .flags = cmd_flags, + .data[0] = &cmd, + .dataflags[0] = IWL_HCMD_DFL_DUP, + }; + struct iwl_proto_offload_cmd_common *common; + u32 enabled = 0, size; + u32 capa_flags = mvm->fw->ucode_capa.flags; +#if IS_ENABLED(CONFIG_IPV6) + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int i; + + if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || + capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { + struct iwl_ns_config *nsc; + struct iwl_targ_addr *addrs; + int n_nsc, n_addrs; + int c; + + if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { + nsc = cmd.v3s.ns_config; + n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S; + addrs = cmd.v3s.targ_addrs; + n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S; + } else { + nsc = cmd.v3l.ns_config; + n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; + addrs = cmd.v3l.targ_addrs; + n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; + } + + if (mvmvif->num_target_ipv6_addrs) + enabled |= IWL_D3_PROTO_OFFLOAD_NS; + + /* + * For each address we have (and that will fit) fill a target + * address struct and combine for NS offload structs with the + * solicited node addresses. + */ + for (i = 0, c = 0; + i < mvmvif->num_target_ipv6_addrs && + i < n_addrs && c < n_nsc; i++) { + struct in6_addr solicited_addr; + int j; + + addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], + &solicited_addr); + for (j = 0; j < c; j++) + if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, + &solicited_addr) == 0) + break; + if (j == c) + c++; + addrs[i].addr = mvmvif->target_ipv6_addrs[i]; + addrs[i].config_num = cpu_to_le32(j); + nsc[j].dest_ipv6_addr = solicited_addr; + memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); + } + + if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) + cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i); + else + cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i); + } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { + if (mvmvif->num_target_ipv6_addrs) { + enabled |= IWL_D3_PROTO_OFFLOAD_NS; + memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); + } + + BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != + sizeof(mvmvif->target_ipv6_addrs[0])); + + for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, + IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) + memcpy(cmd.v2.target_ipv6_addr[i], + &mvmvif->target_ipv6_addrs[i], + sizeof(cmd.v2.target_ipv6_addr[i])); + } else { + if (mvmvif->num_target_ipv6_addrs) { + enabled |= IWL_D3_PROTO_OFFLOAD_NS; + memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); + } + + BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != + sizeof(mvmvif->target_ipv6_addrs[0])); + + for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, + IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) + memcpy(cmd.v1.target_ipv6_addr[i], + &mvmvif->target_ipv6_addrs[i], + sizeof(cmd.v1.target_ipv6_addr[i])); + } +#endif + + if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { + common = &cmd.v3s.common; + size = sizeof(cmd.v3s); + } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { + common = &cmd.v3l.common; + size = sizeof(cmd.v3l); + } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { + common = &cmd.v2.common; + size = sizeof(cmd.v2); + } else { + common = &cmd.v1.common; + size = sizeof(cmd.v1); + } + + if (vif->bss_conf.arp_addr_cnt) { + enabled |= IWL_D3_PROTO_OFFLOAD_ARP; + common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; + memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); + } + + if (!disable_offloading) + common->enabled = cpu_to_le32(enabled); + + hcmd.len[0] = size; + return iwl_mvm_send_cmd(mvm, &hcmd); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c new file mode 100644 index 000000000000..13c97f665ba8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -0,0 +1,1434 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include + +#include "iwl-notif-wait.h" +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "iwl-fw.h" +#include "iwl-debug.h" +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "mvm.h" +#include "iwl-phy-db.h" +#include "iwl-eeprom-parse.h" +#include "iwl-csr.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "rs.h" +#include "fw-api-scan.h" +#include "time-event.h" + +#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +static const struct iwl_op_mode_ops iwl_mvm_ops; +static const struct iwl_op_mode_ops iwl_mvm_ops_mq; + +struct iwl_mvm_mod_params iwlmvm_mod_params = { + .power_scheme = IWL_POWER_SCHEME_BPS, + .tfd_q_hang_detect = true + /* rest of fields are 0 by default */ +}; + +module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO); +MODULE_PARM_DESC(init_dbg, + "set to true to debug an ASSERT in INIT fw (default: false"); +module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); +MODULE_PARM_DESC(power_scheme, + "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); +module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, + bool, S_IRUGO); +MODULE_PARM_DESC(tfd_q_hang_detect, + "TFD queues hang detection (default: true"); + +/* + * module init and exit functions + */ +static int __init iwl_mvm_init(void) +{ + int ret; + + ret = iwl_mvm_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); + + if (ret) { + pr_err("Unable to register MVM op_mode: %d\n", ret); + iwl_mvm_rate_control_unregister(); + } + + return ret; +} +module_init(iwl_mvm_init); + +static void __exit iwl_mvm_exit(void) +{ + iwl_opmode_deregister("iwlmvm"); + iwl_mvm_rate_control_unregister(); +} +module_exit(iwl_mvm_exit); + +static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; + u32 reg_val = 0; + u32 phy_config = iwl_mvm_get_phy_config(mvm); + + radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> + FW_PHY_CFG_RADIO_TYPE_POS; + radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> + FW_PHY_CFG_RADIO_STEP_POS; + radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> + FW_PHY_CFG_RADIO_DASH_POS; + + /* SKU control */ + reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; + reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << + CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; + + /* radio configuration */ + reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; + reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; + reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; + + WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & + ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); + + /* + * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and + * shouldn't be set to any non-zero value. The same is supposed to be + * true of the other HW, but unsetting them (such as the 7260) causes + * automatic tests to fail on seemingly unrelated errors. Need to + * further investigate this, but for now we'll separate cases. + */ + if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; + + iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | + CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | + CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | + CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, + reg_val); + + IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, + radio_cfg_step, radio_cfg_dash); + + /* + * W/A : NIC is stuck in a reset state after Early PCIe power off + * (PCIe power is lost before PERST# is asserted), causing ME FW + * to lose ownership and not being able to obtain it back. + */ + if (!mvm->trans->cfg->apmg_not_supported) + iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, + ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); +} + +struct iwl_rx_handlers { + u16 cmd_id; + bool async; + void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +}; + +#define RX_HANDLER(_cmd_id, _fn, _async) \ + { .cmd_id = _cmd_id , .fn = _fn , .async = _async } +#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ + { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } + +/* + * Handlers for fw notifications + * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME + * This list should be in order of frequency for performance purposes. + * + * The handler can be SYNC - this means that it will be called in the Rx path + * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and + * only in this case!), it should be set as ASYNC. In that case, it will be + * called from a worker with mvm->mutex held. + */ +static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { + RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), + RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), + + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), + RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true), + RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), + RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, + iwl_mvm_rx_ant_coupling_notif, true), + + RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), + RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), + + RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), + + RX_HANDLER(SCAN_ITERATION_COMPLETE, + iwl_mvm_rx_lmac_scan_iter_complete_notif, false), + RX_HANDLER(SCAN_OFFLOAD_COMPLETE, + iwl_mvm_rx_lmac_scan_complete_notif, true), + RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, + false), + RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, + true), + RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, + iwl_mvm_rx_umac_scan_iter_complete_notif, false), + + RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), + + RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, + false), + + RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), + RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, + iwl_mvm_power_uapsd_misbehaving_ap_notif, false), + RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), + RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, + iwl_mvm_temp_notif, true), + + RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, + true), + RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), + RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), + +}; +#undef RX_HANDLER +#undef RX_HANDLER_GRP +#define CMD(x) [x] = #x + +static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = { + CMD(MVM_ALIVE), + CMD(REPLY_ERROR), + CMD(ECHO_CMD), + CMD(INIT_COMPLETE_NOTIF), + CMD(PHY_CONTEXT_CMD), + CMD(MGMT_MCAST_KEY), + CMD(TX_CMD), + CMD(TXPATH_FLUSH), + CMD(SHARED_MEM_CFG), + CMD(MAC_CONTEXT_CMD), + CMD(TIME_EVENT_CMD), + CMD(TIME_EVENT_NOTIFICATION), + CMD(BINDING_CONTEXT_CMD), + CMD(TIME_QUOTA_CMD), + CMD(NON_QOS_TX_COUNTER_CMD), + CMD(DC2DC_CONFIG_CMD), + CMD(NVM_ACCESS_CMD), + CMD(PHY_CONFIGURATION_CMD), + CMD(CALIB_RES_NOTIF_PHY_DB), + CMD(SET_CALIB_DEFAULT_CMD), + CMD(FW_PAGING_BLOCK_CMD), + CMD(ADD_STA_KEY), + CMD(ADD_STA), + CMD(FW_GET_ITEM_CMD), + CMD(REMOVE_STA), + CMD(LQ_CMD), + CMD(SCAN_OFFLOAD_CONFIG_CMD), + CMD(MATCH_FOUND_NOTIFICATION), + CMD(SCAN_OFFLOAD_REQUEST_CMD), + CMD(SCAN_OFFLOAD_ABORT_CMD), + CMD(HOT_SPOT_CMD), + CMD(SCAN_OFFLOAD_COMPLETE), + CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), + CMD(SCAN_ITERATION_COMPLETE), + CMD(POWER_TABLE_CMD), + CMD(WEP_KEY), + CMD(REPLY_RX_PHY_CMD), + CMD(REPLY_RX_MPDU_CMD), + CMD(BEACON_NOTIFICATION), + CMD(BEACON_TEMPLATE_CMD), + CMD(STATISTICS_CMD), + CMD(STATISTICS_NOTIFICATION), + CMD(EOSP_NOTIFICATION), + CMD(REDUCE_TX_POWER_CMD), + CMD(TX_ANT_CONFIGURATION_CMD), + CMD(D3_CONFIG_CMD), + CMD(D0I3_END_CMD), + CMD(PROT_OFFLOAD_CONFIG_CMD), + CMD(OFFLOADS_QUERY_CMD), + CMD(REMOTE_WAKE_CONFIG_CMD), + CMD(WOWLAN_PATTERNS), + CMD(WOWLAN_CONFIGURATION), + CMD(WOWLAN_TSC_RSC_PARAM), + CMD(WOWLAN_TKIP_PARAM), + CMD(WOWLAN_KEK_KCK_MATERIAL), + CMD(WOWLAN_GET_STATUSES), + CMD(WOWLAN_TX_POWER_PER_DB), + CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD), + CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD), + CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD), + CMD(CARD_STATE_NOTIFICATION), + CMD(MISSED_BEACONS_NOTIFICATION), + CMD(BT_COEX_PRIO_TABLE), + CMD(BT_COEX_PROT_ENV), + CMD(BT_PROFILE_NOTIFICATION), + CMD(BT_CONFIG), + CMD(MCAST_FILTER_CMD), + CMD(BCAST_FILTER_CMD), + CMD(REPLY_SF_CFG_CMD), + CMD(REPLY_BEACON_FILTERING_CMD), + CMD(CMD_DTS_MEASUREMENT_TRIGGER), + CMD(DTS_MEASUREMENT_NOTIFICATION), + CMD(REPLY_THERMAL_MNG_BACKOFF), + CMD(MAC_PM_POWER_TABLE), + CMD(LTR_CONFIG), + CMD(BT_COEX_CI), + CMD(BT_COEX_UPDATE_SW_BOOST), + CMD(BT_COEX_UPDATE_CORUN_LUT), + CMD(BT_COEX_UPDATE_REDUCED_TXP), + CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), + CMD(ANTENNA_COUPLING_NOTIFICATION), + CMD(SCD_QUEUE_CFG), + CMD(SCAN_CFG_CMD), + CMD(SCAN_REQ_UMAC), + CMD(SCAN_ABORT_UMAC), + CMD(SCAN_COMPLETE_UMAC), + CMD(TDLS_CHANNEL_SWITCH_CMD), + CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), + CMD(TDLS_CONFIG_CMD), + CMD(MCC_UPDATE_CMD), + CMD(SCAN_ITERATION_COMPLETE_UMAC), +}; +#undef CMD + +/* this forward declaration can avoid to export the function */ +static void iwl_mvm_async_handlers_wk(struct work_struct *wk); +static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); + +static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) +{ + const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs; + + if (!pwr_tx_backoff) + return 0; + + while (pwr_tx_backoff->pwr) { + if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr) + return pwr_tx_backoff->backoff; + + pwr_tx_backoff++; + } + + return 0; +} + +static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); + +static struct iwl_op_mode * +iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, struct dentry *dbgfs_dir) +{ + struct ieee80211_hw *hw; + struct iwl_op_mode *op_mode; + struct iwl_mvm *mvm; + struct iwl_trans_config trans_cfg = {}; + static const u8 no_reclaim_cmds[] = { + TX_CMD, + }; + int err, scan_size; + u32 min_backoff; + + /* + * We use IWL_MVM_STATION_COUNT to check the validity of the station + * index all over the driver - check that its value corresponds to the + * array size. + */ + BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT); + + /******************************** + * 1. Allocating and configuring HW data + ********************************/ + hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + + sizeof(struct iwl_mvm), + &iwl_mvm_hw_ops); + if (!hw) + return NULL; + + if (cfg->max_rx_agg_size) + hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; + + if (cfg->max_tx_agg_size) + hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; + + op_mode = hw->priv; + + mvm = IWL_OP_MODE_GET_MVM(op_mode); + mvm->dev = trans->dev; + mvm->trans = trans; + mvm->cfg = cfg; + mvm->fw = fw; + mvm->hw = hw; + + if (iwl_mvm_has_new_rx_api(mvm)) { + op_mode->ops = &iwl_mvm_ops_mq; + } else { + op_mode->ops = &iwl_mvm_ops; + + if (WARN_ON(trans->num_rx_queues > 1)) + goto out_free; + } + + mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; + + mvm->aux_queue = 15; + mvm->first_agg_queue = 16; + mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + if (mvm->cfg->base_params->num_of_queues == 16) { + mvm->aux_queue = 11; + mvm->first_agg_queue = 12; + } + mvm->sf_state = SF_UNINIT; + mvm->low_latency_agg_frame_limit = 6; + mvm->cur_ucode = IWL_UCODE_INIT; + + mutex_init(&mvm->mutex); + mutex_init(&mvm->d0i3_suspend_mutex); + spin_lock_init(&mvm->async_handlers_lock); + INIT_LIST_HEAD(&mvm->time_event_list); + INIT_LIST_HEAD(&mvm->aux_roc_te_list); + INIT_LIST_HEAD(&mvm->async_handlers_list); + spin_lock_init(&mvm->time_event_lock); + spin_lock_init(&mvm->queue_info_lock); + + INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); + INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); + INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); + INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); + INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); + INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); + + spin_lock_init(&mvm->d0i3_tx_lock); + spin_lock_init(&mvm->refs_lock); + skb_queue_head_init(&mvm->d0i3_tx); + init_waitqueue_head(&mvm->d0i3_exit_waitq); + + SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); + + /* + * Populate the state variables that the transport layer needs + * to know about. + */ + trans_cfg.op_mode = op_mode; + trans_cfg.no_reclaim_cmds = no_reclaim_cmds; + trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WIDE_CMD_HDR); + + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) + trans_cfg.bc_table_dword = true; + + trans_cfg.command_names = iwl_mvm_cmd_strings; + + trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; + trans_cfg.scd_set_active = true; + + trans_cfg.sdio_adma_addr = fw->sdio_adma_addr; + + /* Set a short watchdog for the command queue */ + trans_cfg.cmd_q_wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, NULL, false, true); + + snprintf(mvm->hw->wiphy->fw_version, + sizeof(mvm->hw->wiphy->fw_version), + "%s", fw->fw_version); + + /* Configure transport layer */ + iwl_trans_configure(mvm->trans, &trans_cfg); + + trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); + trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; + trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; + memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, + sizeof(trans->dbg_conf_tlv)); + trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; + + /* set up notification wait support */ + iwl_notification_wait_init(&mvm->notif_wait); + + /* Init phy db */ + mvm->phy_db = iwl_phy_db_init(trans); + if (!mvm->phy_db) { + IWL_ERR(mvm, "Cannot init phy_db\n"); + goto out_free; + } + + IWL_INFO(mvm, "Detected %s, REV=0x%X\n", + mvm->cfg->name, mvm->trans->hw_rev); + + min_backoff = calc_min_backoff(trans, cfg); + iwl_mvm_tt_initialize(mvm, min_backoff); + + if (iwlwifi_mod_params.nvm_file) + mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; + else + IWL_DEBUG_EEPROM(mvm->trans->dev, + "working without external nvm file\n"); + + if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, + "not allowing power-up and not having nvm_file\n")) + goto out_free; + + /* + * Even if nvm exists in the nvm_file driver should read again the nvm + * from the nic because there might be entries that exist in the OTP + * and not in the file. + * for nics with no_power_up_nic_in_init: rely completley on nvm_file + */ + if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) { + err = iwl_nvm_init(mvm, false); + if (err) + goto out_free; + } else { + err = iwl_trans_start_hw(mvm->trans); + if (err) + goto out_free; + + mutex_lock(&mvm->mutex); + err = iwl_run_init_mvm_ucode(mvm, true); + if (!err || !iwlmvm_mod_params.init_dbg) + iwl_trans_stop_device(trans); + mutex_unlock(&mvm->mutex); + /* returns 0 if successful, 1 if success but in rfkill */ + if (err < 0 && !iwlmvm_mod_params.init_dbg) { + IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); + goto out_free; + } + } + + scan_size = iwl_mvm_scan_size(mvm); + + mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); + if (!mvm->scan_cmd) + goto out_free; + + /* Set EBS as successful as long as not stated otherwise by the FW. */ + mvm->last_ebs_successful = true; + + err = iwl_mvm_mac_setup_register(mvm); + if (err) + goto out_free; + + err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); + if (err) + goto out_unregister; + + memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); + + /* rpm starts with a taken ref. only set the appropriate bit here. */ + mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1; + + iwl_mvm_tof_init(mvm); + + return op_mode; + + out_unregister: + ieee80211_unregister_hw(mvm->hw); + iwl_mvm_leds_exit(mvm); + out_free: + flush_delayed_work(&mvm->fw_dump_wk); + iwl_phy_db_free(mvm->phy_db); + kfree(mvm->scan_cmd); + if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) + iwl_trans_op_mode_leave(trans); + ieee80211_free_hw(mvm->hw); + return NULL; +} + +static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + int i; + + iwl_mvm_leds_exit(mvm); + + iwl_mvm_tt_exit(mvm); + + ieee80211_unregister_hw(mvm->hw); + + kfree(mvm->scan_cmd); + kfree(mvm->mcast_filter_cmd); + mvm->mcast_filter_cmd = NULL; + +#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) + kfree(mvm->d3_resume_sram); + if (mvm->nd_config) { + kfree(mvm->nd_config->match_sets); + kfree(mvm->nd_config->scan_plans); + kfree(mvm->nd_config); + mvm->nd_config = NULL; + } +#endif + + iwl_trans_op_mode_leave(mvm->trans); + + iwl_phy_db_free(mvm->phy_db); + mvm->phy_db = NULL; + + iwl_free_nvm_data(mvm->nvm_data); + for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) + kfree(mvm->nvm_sections[i].data); + + iwl_mvm_tof_clean(mvm); + + ieee80211_free_hw(mvm->hw); +} + +struct iwl_async_handler_entry { + struct list_head list; + struct iwl_rx_cmd_buffer rxb; + void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +}; + +void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) +{ + struct iwl_async_handler_entry *entry, *tmp; + + spin_lock_bh(&mvm->async_handlers_lock); + list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } + spin_unlock_bh(&mvm->async_handlers_lock); +} + +static void iwl_mvm_async_handlers_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, async_handlers_wk); + struct iwl_async_handler_entry *entry, *tmp; + struct list_head local_list; + + INIT_LIST_HEAD(&local_list); + + /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ + mutex_lock(&mvm->mutex); + + /* + * Sync with Rx path with a lock. Remove all the entries from this list, + * add them to a local one (lock free), and then handle them. + */ + spin_lock_bh(&mvm->async_handlers_lock); + list_splice_init(&mvm->async_handlers_list, &local_list); + spin_unlock_bh(&mvm->async_handlers_lock); + + list_for_each_entry_safe(entry, tmp, &local_list, list) { + entry->fn(mvm, &entry->rxb); + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&mvm->mutex); +} + +static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_cmd *cmds_trig; + int i; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); + cmds_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { + /* don't collect on CMD 0 */ + if (!cmds_trig->cmds[i].cmd_id) + break; + + if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || + cmds_trig->cmds[i].group_id != pkt->hdr.group_id) + continue; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "CMD 0x%02x.%02x received", + pkt->hdr.group_id, pkt->hdr.cmd); + break; + } +} + +static void iwl_mvm_rx_common(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_rx_packet *pkt) +{ + int i; + + iwl_mvm_rx_check_trigger(mvm, pkt); + + /* + * Do the notification wait before RX handlers so + * even if the RX handler consumes the RXB we have + * access to it in the notification wait entry. + */ + iwl_notification_wait_notify(&mvm->notif_wait, pkt); + + for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { + const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; + struct iwl_async_handler_entry *entry; + + if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) + continue; + + if (!rx_h->async) { + rx_h->fn(mvm, rxb); + return; + } + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + /* we can't do much... */ + if (!entry) + return; + + entry->rxb._page = rxb_steal_page(rxb); + entry->rxb._offset = rxb->_offset; + entry->rxb._rx_page_order = rxb->_rx_page_order; + entry->fn = rx_h->fn; + spin_lock(&mvm->async_handlers_lock); + list_add_tail(&entry->list, &mvm->async_handlers_list); + spin_unlock(&mvm->async_handlers_lock); + schedule_work(&mvm->async_handlers_wk); + break; + } +} + +static void iwl_mvm_rx(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) + iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) + iwl_mvm_rx_rx_phy_cmd(mvm, rxb); + else + iwl_mvm_rx_common(mvm, rxb, pkt); +} + +static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) + iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) + iwl_mvm_rx_rx_phy_cmd(mvm, rxb); + else + iwl_mvm_rx_common(mvm, rxb, pkt); +} + +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + unsigned long mq; + int q; + + spin_lock_bh(&mvm->queue_info_lock); + mq = mvm->queue_info[queue].hw_queue_to_mac80211; + spin_unlock_bh(&mvm->queue_info_lock); + + if (WARN_ON_ONCE(!mq)) + return; + + for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { + if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { + IWL_DEBUG_TX_QUEUES(mvm, + "queue %d (mac80211 %d) already stopped\n", + queue, q); + continue; + } + + ieee80211_stop_queue(mvm->hw, q); + } +} + +static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + unsigned long mq; + int q; + + spin_lock_bh(&mvm->queue_info_lock); + mq = mvm->queue_info[queue].hw_queue_to_mac80211; + spin_unlock_bh(&mvm->queue_info_lock); + + if (WARN_ON_ONCE(!mq)) + return; + + for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { + if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { + IWL_DEBUG_TX_QUEUES(mvm, + "queue %d (mac80211 %d) still stopped\n", + queue, q); + continue; + } + + ieee80211_wake_queue(mvm->hw, q); + } +} + +void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) +{ + if (state) + set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); + else + clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); + + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); +} + +static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + bool calibrating = ACCESS_ONCE(mvm->calibrating); + + if (state) + set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); + else + clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); + + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); + + /* iwl_run_init_mvm_ucode is waiting for results, abort it */ + if (calibrating) + iwl_abort_notification_waits(&mvm->notif_wait); + + /* + * Stop the device if we run OPERATIONAL firmware or if we are in the + * middle of the calibrations. + */ + return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); +} + +static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + ieee80211_free_txskb(mvm->hw, skb); +} + +struct iwl_mvm_reprobe { + struct device *dev; + struct work_struct work; +}; + +static void iwl_mvm_reprobe_wk(struct work_struct *wk) +{ + struct iwl_mvm_reprobe *reprobe; + + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); + kfree(reprobe); + module_put(THIS_MODULE); +} + +static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) +{ + struct iwl_mvm *mvm = + container_of(work, struct iwl_mvm, fw_dump_wk.work); + + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) + return; + + mutex_lock(&mvm->mutex); + + /* stop recording */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); + } else { + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); + /* wait before we collect the data till the DBGC stop */ + udelay(100); + } + + iwl_mvm_fw_error_dump(mvm); + + /* start recording again if the firmware is not crashed */ + WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && + mvm->fw->dbg_dest_tlv && + iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); + + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); +} + +void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) +{ + iwl_abort_notification_waits(&mvm->notif_wait); + + /* + * This is a bit racy, but worst case we tell mac80211 about + * a stopped/aborted scan when that was already done which + * is not a problem. It is necessary to abort any os scan + * here because mac80211 requires having the scan cleared + * before restarting. + * We'll reset the scan_status to NONE in restart cleanup in + * the next start() call from mac80211. If restart isn't called + * (no fw restart) scan status will stay busy. + */ + iwl_mvm_report_scan_aborted(mvm); + + /* + * If we're restarting already, don't cycle restarts. + * If INIT fw asserted, it will likely fail again. + * If WoWLAN fw asserted, don't restart either, mac80211 + * can't recover this since we're already half suspended. + */ + if (!mvm->restart_fw && fw_error) { + iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, + NULL); + } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) { + struct iwl_mvm_reprobe *reprobe; + + IWL_ERR(mvm, + "Firmware error during reconfiguration - reprobe!\n"); + + /* + * get a module reference to avoid doing this while unloading + * anyway and to avoid scheduling a work with code that's + * being removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(mvm, "Module is being unloaded - abort\n"); + return; + } + + reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); + if (!reprobe) { + module_put(THIS_MODULE); + return; + } + reprobe->dev = mvm->trans->dev; + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { + /* don't let the transport/FW power down */ + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + + if (fw_error && mvm->restart_fw > 0) + mvm->restart_fw--; + ieee80211_restart_hw(mvm->hw); + } +} + +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_mvm_dump_nic_error_log(mvm); + + iwl_mvm_nic_restart(mvm, true); +} + +static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + WARN_ON(1); + iwl_mvm_nic_restart(mvm, true); +} + +struct iwl_d0i3_iter_data { + struct iwl_mvm *mvm; + u8 ap_sta_id; + u8 vif_count; + u8 offloading_tid; + bool disable_offloading; +}; + +static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_d0i3_iter_data *iter_data) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_sta *ap_sta; + struct iwl_mvm_sta *mvmsta; + u32 available_tids = 0; + u8 tid; + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || + mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) + return false; + + ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); + if (IS_ERR_OR_NULL(ap_sta)) + return false; + + mvmsta = iwl_mvm_sta_from_mac80211(ap_sta); + spin_lock_bh(&mvmsta->lock); + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + + /* + * in case of pending tx packets, don't use this tid + * for offloading in order to prevent reuse of the same + * qos seq counters. + */ + if (iwl_mvm_tid_queued(tid_data)) + continue; + + if (tid_data->state != IWL_AGG_OFF) + continue; + + available_tids |= BIT(tid); + } + spin_unlock_bh(&mvmsta->lock); + + /* + * disallow protocol offloading if we have no available tid + * (with no pending frames and no active aggregation, + * as we don't handle "holes" properly - the scheduler needs the + * frame's seq number and TFD index to match) + */ + if (!available_tids) + return true; + + /* for simplicity, just use the first available tid */ + iter_data->offloading_tid = ffs(available_tids) - 1; + return false; +} + +static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_d0i3_iter_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; + + IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr); + if (vif->type != NL80211_IFTYPE_STATION || + !vif->bss_conf.assoc) + return; + + /* + * in case of pending tx packets or active aggregations, + * avoid offloading features in order to prevent reuse of + * the same qos seq counters. + */ + if (iwl_mvm_disallow_offloading(mvm, vif, data)) + data->disable_offloading = true; + + iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); + iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags); + + /* + * on init/association, mvm already configures POWER_TABLE_CMD + * and REPLY_MCAST_FILTER_CMD, so currently don't + * reconfigure them (we might want to use different + * params later on, though). + */ + data->ap_sta_id = mvmvif->ap_sta_id; + data->vif_count++; +} + +static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, + struct iwl_wowlan_config_cmd *cmd, + struct iwl_d0i3_iter_data *iter_data) +{ + struct ieee80211_sta *ap_sta; + struct iwl_mvm_sta *mvm_ap_sta; + + if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT) + return; + + rcu_read_lock(); + + ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]); + if (IS_ERR_OR_NULL(ap_sta)) + goto out; + + mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); + cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; + cmd->offloading_tid = iter_data->offloading_tid; + + /* + * The d0i3 uCode takes care of the nonqos counters, + * so configure only the qos seq ones. + */ + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); +out: + rcu_read_unlock(); +} + +int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; + int ret; + struct iwl_d0i3_iter_data d0i3_iter_data = { + .mvm = mvm, + }; + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | + IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE | + IWL_WOWLAN_WAKEUP_BCN_FILTERING), + }; + struct iwl_d3_manager_config d3_cfg_cmd = { + .min_sleep_time = cpu_to_le32(1000), + .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR), + }; + + IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); + + set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + + /* + * iwl_mvm_ref_sync takes a reference before checking the flag. + * so by checking there is no held reference we prevent a state + * in which iwl_mvm_ref_sync continues successfully while we + * configure the firmware to enter d0i3 + */ + if (iwl_mvm_ref_taken(mvm)) { + IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); + clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + wake_up(&mvm->d0i3_exit_waitq); + return 1; + } + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_enter_d0i3_iterator, + &d0i3_iter_data); + if (d0i3_iter_data.vif_count == 1) { + mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id; + mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; + } else { + WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_offloading = false; + } + + /* make sure we have no running tx while configuring the seqno */ + synchronize_net(); + + /* configure wowlan configuration only if needed */ + if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, + &d0i3_iter_data); + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, + sizeof(wowlan_config_cmd), + &wowlan_config_cmd); + if (ret) + return ret; + } + + return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, + flags | CMD_MAKE_TRANS_IDLE, + sizeof(d3_cfg_cmd), &d3_cfg_cmd); +} + +static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = _data; + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO; + + IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr); + if (vif->type != NL80211_IFTYPE_STATION || + !vif->bss_conf.assoc) + return; + + iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); +} + +struct iwl_mvm_wakeup_reason_iter_data { + struct iwl_mvm *mvm; + u32 wakeup_reasons; +}; + +static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_wakeup_reason_iter_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc && + data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) { + if (data->wakeup_reasons & + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) + iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); + else + ieee80211_beacon_loss(vif); + } +} + +void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) +{ + struct ieee80211_sta *sta = NULL; + struct iwl_mvm_sta *mvm_ap_sta; + int i; + bool wake_queues = false; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->d0i3_tx_lock); + + if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT) + goto out; + + IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); + + /* get the sta in order to update seq numbers and re-enqueue skbs */ + sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id], + lockdep_is_held(&mvm->mutex)); + + if (IS_ERR_OR_NULL(sta)) { + sta = NULL; + goto out; + } + + if (mvm->d0i3_offloading && qos_seq) { + /* update qos seq numbers if offloading was enabled */ + mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u16 seq = le16_to_cpu(qos_seq[i]); + /* firmware stores last-used one, we store next one */ + seq += 0x10; + mvm_ap_sta->tid_data[i].seq_number = seq; + } + } +out: + /* re-enqueue (or drop) all packets */ + while (!skb_queue_empty(&mvm->d0i3_tx)) { + struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx); + + if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) + ieee80211_free_txskb(mvm->hw, skb); + + /* if the skb_queue is not empty, we need to wake queues */ + wake_queues = true; + } + clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + wake_up(&mvm->d0i3_exit_waitq); + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + if (wake_queues) + ieee80211_wake_queues(mvm->hw); + + spin_unlock_bh(&mvm->d0i3_tx_lock); +} + +static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); + struct iwl_host_cmd get_status_cmd = { + .id = WOWLAN_GET_STATUSES, + .flags = CMD_HIGH_PRIO | CMD_WANT_SKB, + }; + struct iwl_wowlan_status *status; + int ret; + u32 handled_reasons, wakeup_reasons = 0; + __le16 *qos_seq = NULL; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &get_status_cmd); + if (ret) + goto out; + + if (!get_status_cmd.resp_pkt) + goto out; + + status = (void *)get_status_cmd.resp_pkt->data; + wakeup_reasons = le32_to_cpu(status->wakeup_reasons); + qos_seq = status->qos_seq_ctr; + + IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); + + handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; + if (wakeup_reasons & handled_reasons) { + struct iwl_mvm_wakeup_reason_iter_data data = { + .mvm = mvm, + .wakeup_reasons = wakeup_reasons, + }; + + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d0i3_wakeup_reason_iter, &data); + } +out: + iwl_mvm_d0i3_enable_tx(mvm, qos_seq); + + IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", + wakeup_reasons); + + /* qos_seq might point inside resp_pkt, so free it only now */ + if (get_status_cmd.resp_pkt) + iwl_free_resp(&get_status_cmd); + + /* the FW might have updated the regdomain */ + iwl_mvm_update_changed_regdom(mvm); + + iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); + mutex_unlock(&mvm->mutex); +} + +int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) +{ + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | + CMD_WAKE_UP_TRANS; + int ret; + + IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); + + mutex_lock(&mvm->d0i3_suspend_mutex); + if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { + IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); + __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags); + mutex_unlock(&mvm->d0i3_suspend_mutex); + return 0; + } + mutex_unlock(&mvm->d0i3_suspend_mutex); + + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); + if (ret) + goto out; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_exit_d0i3_iterator, + mvm); +out: + schedule_work(&mvm->d0i3_exit_work); + return ret; +} + +int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK); + return _iwl_mvm_exit_d0i3(mvm); +} + +#define IWL_MVM_COMMON_OPS \ + /* these could be differentiated */ \ + .queue_full = iwl_mvm_stop_sw_queue, \ + .queue_not_full = iwl_mvm_wake_sw_queue, \ + .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ + .free_skb = iwl_mvm_free_skb, \ + .nic_error = iwl_mvm_nic_error, \ + .cmd_queue_full = iwl_mvm_cmd_queue_full, \ + .nic_config = iwl_mvm_nic_config, \ + .enter_d0i3 = iwl_mvm_enter_d0i3, \ + .exit_d0i3 = iwl_mvm_exit_d0i3, \ + /* as we only register one, these MUST be common! */ \ + .start = iwl_op_mode_mvm_start, \ + .stop = iwl_op_mode_mvm_stop + +static const struct iwl_op_mode_ops iwl_mvm_ops = { + IWL_MVM_COMMON_OPS, + .rx = iwl_mvm_rx, +}; + +static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, + unsigned int queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); +} + +static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { + IWL_MVM_COMMON_OPS, + .rx = iwl_mvm_rx_mq, + .rx_rss = iwl_mvm_rx_mq_rss, +}; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c new file mode 100644 index 000000000000..e68a475e3071 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -0,0 +1,295 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include "fw-api.h" +#include "mvm.h" + +/* Maps the driver specific channel width definition to the fw values */ +u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + return PHY_VHT_CHANNEL_MODE20; + case NL80211_CHAN_WIDTH_40: + return PHY_VHT_CHANNEL_MODE40; + case NL80211_CHAN_WIDTH_80: + return PHY_VHT_CHANNEL_MODE80; + case NL80211_CHAN_WIDTH_160: + return PHY_VHT_CHANNEL_MODE160; + default: + WARN(1, "Invalid channel width=%u", chandef->width); + return PHY_VHT_CHANNEL_MODE20; + } +} + +/* + * Maps the driver specific control channel position (relative to the center + * freq) definitions to the the fw values + */ +u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) +{ + switch (chandef->chan->center_freq - chandef->center_freq1) { + case -70: + return PHY_VHT_CTRL_POS_4_BELOW; + case -50: + return PHY_VHT_CTRL_POS_3_BELOW; + case -30: + return PHY_VHT_CTRL_POS_2_BELOW; + case -10: + return PHY_VHT_CTRL_POS_1_BELOW; + case 10: + return PHY_VHT_CTRL_POS_1_ABOVE; + case 30: + return PHY_VHT_CTRL_POS_2_ABOVE; + case 50: + return PHY_VHT_CTRL_POS_3_ABOVE; + case 70: + return PHY_VHT_CTRL_POS_4_ABOVE; + default: + WARN(1, "Invalid channel definition"); + case 0: + /* + * The FW is expected to check the control channel position only + * when in HT/VHT and the channel width is not 20MHz. Return + * this value as the default one. + */ + return PHY_VHT_CTRL_POS_1_BELOW; + } +} + +/* + * Construct the generic fields of the PHY context command + */ +static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, + struct iwl_phy_context_cmd *cmd, + u32 action, u32 apply_time) +{ + memset(cmd, 0, sizeof(struct iwl_phy_context_cmd)); + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, + ctxt->color)); + cmd->action = cpu_to_le32(action); + cmd->apply_time = cpu_to_le32(apply_time); +} + +/* + * Add the phy configuration to the PHY context command + */ +static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, + struct iwl_phy_context_cmd *cmd, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + u8 active_cnt, idle_cnt; + + /* Set the channel info data */ + cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + PHY_BAND_24 : PHY_BAND_5); + + cmd->ci.channel = chandef->chan->hw_value; + cmd->ci.width = iwl_mvm_get_channel_width(chandef); + cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); + + /* Set rx the chains */ + idle_cnt = chains_static; + active_cnt = chains_dynamic; + + /* In scenarios where we only ever use a single-stream rates, + * i.e. legacy 11b/g/a associations, single-stream APs or even + * static SMPS, enable both chains to get diversity, improving + * the case where we're far enough from the AP that attenuation + * between the two antennas is sufficiently different to impact + * performance. + */ + if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) { + idle_cnt = 2; + active_cnt = 2; + } + + cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << + PHY_RX_CHAIN_VALID_POS); + cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); + cmd->rxchain_info |= cpu_to_le32(active_cnt << + PHY_RX_CHAIN_MIMO_CNT_POS); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (unlikely(mvm->dbgfs_rx_phyinfo)) + cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); +#endif + + cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); +} + +/* + * Send a command to apply the current phy configuration. The command is send + * only if something in the configuration changed: in case that this is the + * first time that the phy configuration is applied or in case that the phy + * configuration changed from the previous apply. + */ +static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic, + u32 action, u32 apply_time) +{ + struct iwl_phy_context_cmd cmd; + int ret; + + /* Set the command header fields */ + iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); + + /* Set the command data */ + iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, + chains_static, chains_dynamic); + + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, + sizeof(struct iwl_phy_context_cmd), + &cmd); + if (ret) + IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); + return ret; +} + +/* + * Send a command to add a PHY context based on the current HW configuration. + */ +int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + ctxt->ref); + lockdep_assert_held(&mvm->mutex); + + ctxt->channel = chandef->chan; + + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_ADD, 0); +} + +/* + * Update the number of references to the given PHY context. This is valid only + * in case the PHY context was already created, i.e., its reference count > 0. + */ +void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +{ + lockdep_assert_held(&mvm->mutex); + ctxt->ref++; +} + +/* + * Send a command to modify the PHY context based on the current HW + * configuration. Note that the function does not check that the configuration + * changed. + */ +int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + lockdep_assert_held(&mvm->mutex); + + ctxt->channel = chandef->chan; + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_MODIFY, 0); +} + +void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +{ + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(!ctxt)) + return; + + ctxt->ref--; +} + +static void iwl_mvm_binding_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + unsigned long *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!mvmvif->phy_ctxt) + return; + + if (vif->type == NL80211_IFTYPE_STATION || + vif->type == NL80211_IFTYPE_AP) + __set_bit(mvmvif->phy_ctxt->id, data); +} + +int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm) +{ + unsigned long phy_ctxt_counter = 0; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_binding_iterator, + &phy_ctxt_counter); + + return hweight8(phy_ctxt_counter); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c new file mode 100644 index 000000000000..bed9696ee410 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -0,0 +1,1040 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include + +#include "iwl-debug.h" +#include "mvm.h" +#include "iwl-modparams.h" +#include "fw-api-power.h" + +#define POWER_KEEP_ALIVE_PERIOD_SEC 25 + +static +int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, + struct iwl_beacon_filter_cmd *cmd, + u32 flags) +{ + IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", + le32_to_cpu(cmd->ba_enable_beacon_abort)); + IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", + le32_to_cpu(cmd->ba_escape_timer)); + IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", + le32_to_cpu(cmd->bf_debug_flag)); + IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", + le32_to_cpu(cmd->bf_enable_beacon_filter)); + IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", + le32_to_cpu(cmd->bf_energy_delta)); + IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", + le32_to_cpu(cmd->bf_escape_timer)); + IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", + le32_to_cpu(cmd->bf_roaming_energy_delta)); + IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", + le32_to_cpu(cmd->bf_roaming_state)); + IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", + le32_to_cpu(cmd->bf_temp_threshold)); + IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", + le32_to_cpu(cmd->bf_temp_fast_filter)); + IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", + le32_to_cpu(cmd->bf_temp_slow_filter)); + + return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags, + sizeof(struct iwl_beacon_filter_cmd), cmd); +} + +static +void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd, + bool d0i3) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->bss_conf.cqm_rssi_thold && !d0i3) { + cmd->bf_energy_delta = + cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); + /* fw uses an absolute value for this */ + cmd->bf_roaming_state = + cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); + } + cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); +} + +static void iwl_mvm_power_log(struct iwl_mvm *mvm, + struct iwl_mac_power_cmd *cmd) +{ + IWL_DEBUG_POWER(mvm, + "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", + cmd->id_and_color, iwlmvm_mod_params.power_scheme, + le16_to_cpu(cmd->flags)); + IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", + le16_to_cpu(cmd->keep_alive_seconds)); + + if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { + IWL_DEBUG_POWER(mvm, "Disable power management\n"); + return; + } + + IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", + le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", + le32_to_cpu(cmd->tx_data_timeout)); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) + IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", + cmd->skip_dtim_periods); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) + IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", + cmd->lprx_rssi_threshold); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { + IWL_DEBUG_POWER(mvm, "uAPSD enabled\n"); + IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", + le32_to_cpu(cmd->rx_data_timeout_uapsd)); + IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", + le32_to_cpu(cmd->tx_data_timeout_uapsd)); + IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid); + IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags); + IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp); + } +} + +static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_power_cmd *cmd) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + enum ieee80211_ac_numbers ac; + bool tid_found = false; + + for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { + if (!mvmvif->queue_params[ac].uapsd) + continue; + + if (mvm->cur_ucode != IWL_UCODE_WOWLAN) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); + + cmd->uapsd_ac_flags |= BIT(ac); + + /* QNDP TID - the highest TID with no admission control */ + if (!tid_found && !mvmvif->queue_params[ac].acm) { + tid_found = true; + switch (ac) { + case IEEE80211_AC_VO: + cmd->qndp_tid = 6; + break; + case IEEE80211_AC_VI: + cmd->qndp_tid = 5; + break; + case IEEE80211_AC_BE: + cmd->qndp_tid = 0; + break; + case IEEE80211_AC_BK: + cmd->qndp_tid = 1; + break; + } + } + } + + if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* set advanced pm flag with no uapsd ACs to enable ps-poll */ + if (mvmvif->dbgfs_pm.use_ps_poll) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); +#endif + return; + } + + cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); + + if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | + BIT(IEEE80211_AC_VI) | + BIT(IEEE80211_AC_BE) | + BIT(IEEE80211_AC_BK))) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); + cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); + cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? + cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : + cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); + } + + cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; + + if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & + cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { + cmd->rx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); + } else { + cmd->rx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); + cmd->tx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); + } + + if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { + cmd->heavy_tx_thld_packets = + IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS; + cmd->heavy_rx_thld_packets = + IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS; + } else { + cmd->heavy_tx_thld_packets = + IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; + cmd->heavy_rx_thld_packets = + IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; + } + cmd->heavy_tx_thld_percentage = + IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; + cmd->heavy_rx_thld_percentage = + IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; +} + +static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, + ETH_ALEN)) + return false; + + if (vif->p2p && + !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD)) + return false; + /* + * Avoid using uAPSD if P2P client is associated to GO that uses + * opportunistic power save. This is due to current FW limitation. + */ + if (vif->p2p && + (vif->bss_conf.p2p_noa_attr.oppps_ctwindow & + IEEE80211_P2P_OPPPS_ENABLE_BIT)) + return false; + + /* + * Avoid using uAPSD if client is in DCM - + * low latency issue in Miracast + */ + if (iwl_mvm_phy_ctx_count(mvm) >= 2) + return false; + + return true; +} + +static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *chan; + bool radar_detect = false; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + WARN_ON(!chanctx_conf); + if (chanctx_conf) { + chan = chanctx_conf->def.chan; + radar_detect = chan->flags & IEEE80211_CHAN_RADAR; + } + rcu_read_unlock(); + + return radar_detect; +} + +static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_power_cmd *cmd, + bool host_awake) +{ + int dtimper = vif->bss_conf.dtim_period ?: 1; + int skip; + + /* disable, in case we're supposed to override */ + cmd->skip_dtim_periods = 0; + cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + + if (iwl_mvm_power_is_radar(vif)) + return; + + if (dtimper >= 10) + return; + + /* TODO: check that multicast wake lock is off */ + + if (host_awake) { + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) + return; + skip = 2; + } else { + int dtimper_tu = dtimper * vif->bss_conf.beacon_int; + + if (WARN_ON(!dtimper_tu)) + return; + /* configure skip over dtim up to 306TU - 314 msec */ + skip = max_t(u8, 1, 306 / dtimper_tu); + } + + /* the firmware really expects "look at every X DTIMs", so add 1 */ + cmd->skip_dtim_periods = 1 + skip; + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); +} + +static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_power_cmd *cmd, + bool host_awake) +{ + int dtimper, bi; + int keep_alive; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + dtimper = vif->bss_conf.dtim_period; + bi = vif->bss_conf.beacon_int; + + /* + * Regardless of power management state the driver must set + * keep alive period. FW will use it for sending keep alive NDPs + * immediately after association. Check that keep alive period + * is at least 3 * DTIM + */ + keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), + USEC_PER_SEC); + keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC); + cmd->keep_alive_seconds = cpu_to_le16(keep_alive); + + if (mvm->ps_disabled) + return; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); + + if (!vif->bss_conf.ps || !mvmvif->pm_enabled) + return; + + if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && + (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) || + !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE)) + return; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); + + if (vif->bss_conf.beacon_rate && + (vif->bss_conf.beacon_rate->bitrate == 10 || + vif->bss_conf.beacon_rate->bitrate == 60)) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); + cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; + } + + iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake); + + if (!host_awake) { + cmd->rx_data_timeout = + cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout = + cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); + } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) { + cmd->tx_data_timeout = + cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT); + cmd->rx_data_timeout = + cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT); + } else { + cmd->rx_data_timeout = + cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout = + cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT); + } + + if (iwl_mvm_power_allow_uapsd(mvm, vif)) + iwl_mvm_power_configure_uapsd(mvm, vif, cmd); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) + cmd->keep_alive_seconds = + cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { + if (mvmvif->dbgfs_pm.skip_over_dtim) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + else + cmd->flags &= + cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) + cmd->rx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) + cmd->tx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) + cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods; + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { + if (mvmvif->dbgfs_pm.lprx_ena) + cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); + else + cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) + cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) { + if (mvmvif->dbgfs_pm.snooze_ena) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); + else + cmd->flags &= + cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) { + u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK; + if (mvmvif->dbgfs_pm.uapsd_misbehaving) + cmd->flags |= cpu_to_le16(flag); + else + cmd->flags &= cpu_to_le16(flag); + } +#endif /* CONFIG_IWLWIFI_DEBUGFS */ +} + +static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mac_power_cmd cmd = {}; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd, + mvm->cur_ucode != IWL_UCODE_WOWLAN); + iwl_mvm_power_log(mvm, &cmd); +#ifdef CONFIG_IWLWIFI_DEBUGFS + memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); +#endif + + return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0, + sizeof(cmd), &cmd); +} + +int iwl_mvm_power_update_device(struct iwl_mvm *mvm) +{ + struct iwl_device_power_cmd cmd = { + .flags = 0, + }; + + if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) + mvm->ps_disabled = true; + + if (!mvm->ps_disabled) + cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : + mvm->disable_power_off) + cmd.flags &= + cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif + IWL_DEBUG_POWER(mvm, + "Sending device power command with flags = 0x%X\n", + cmd.flags); + + return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd), + &cmd); +} + +void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, + ETH_ALEN)) + eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); +} + +static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + u8 *ap_sta_id = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* The ap_sta_id is not expected to change during current association + * so no explicit protection is needed + */ + if (mvmvif->ap_sta_id == *ap_sta_id) + memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, + ETH_ALEN); +} + +void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; + u8 ap_sta_id = le32_to_cpu(notif->sta_id); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id); +} + +struct iwl_power_vifs { + struct iwl_mvm *mvm; + struct ieee80211_vif *bf_vif; + struct ieee80211_vif *bss_vif; + struct ieee80211_vif *p2p_vif; + struct ieee80211_vif *ap_vif; + struct ieee80211_vif *monitor_vif; + bool p2p_active; + bool bss_active; + bool ap_active; + bool monitor_active; +}; + +static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->pm_enabled = false; +} + +static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool *disable_ps = _data; + + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + *disable_ps |= mvmvif->ps_disabled; +} + +static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_power_vifs *power_iterator = _data; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_P2P_DEVICE: + break; + + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + /* only a single MAC of the same type */ + WARN_ON(power_iterator->ap_vif); + power_iterator->ap_vif = vif; + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + power_iterator->ap_active = true; + break; + + case NL80211_IFTYPE_MONITOR: + /* only a single MAC of the same type */ + WARN_ON(power_iterator->monitor_vif); + power_iterator->monitor_vif = vif; + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + power_iterator->monitor_active = true; + break; + + case NL80211_IFTYPE_P2P_CLIENT: + /* only a single MAC of the same type */ + WARN_ON(power_iterator->p2p_vif); + power_iterator->p2p_vif = vif; + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + power_iterator->p2p_active = true; + break; + + case NL80211_IFTYPE_STATION: + /* only a single MAC of the same type */ + WARN_ON(power_iterator->bss_vif); + power_iterator->bss_vif = vif; + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + power_iterator->bss_active = true; + + if (mvmvif->bf_data.bf_enabled && + !WARN_ON(power_iterator->bf_vif)) + power_iterator->bf_vif = vif; + + break; + + default: + break; + } +} + +static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, + struct iwl_power_vifs *vifs) +{ + struct iwl_mvm_vif *bss_mvmvif = NULL; + struct iwl_mvm_vif *p2p_mvmvif = NULL; + struct iwl_mvm_vif *ap_mvmvif = NULL; + bool client_same_channel = false; + bool ap_same_channel = false; + + lockdep_assert_held(&mvm->mutex); + + /* set pm_enable to false */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_disable_pm_iterator, + NULL); + + if (vifs->bss_vif) + bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); + + if (vifs->p2p_vif) + p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif); + + if (vifs->ap_vif) + ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); + + /* don't allow PM if any TDLS stations exist */ + if (iwl_mvm_tdls_sta_count(mvm, NULL)) + return; + + /* enable PM on bss if bss stand alone */ + if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { + bss_mvmvif->pm_enabled = true; + return; + } + + /* enable PM on p2p if p2p stand alone */ + if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) + p2p_mvmvif->pm_enabled = true; + return; + } + + if (vifs->bss_active && vifs->p2p_active) + client_same_channel = (bss_mvmvif->phy_ctxt->id == + p2p_mvmvif->phy_ctxt->id); + if (vifs->bss_active && vifs->ap_active) + ap_same_channel = (bss_mvmvif->phy_ctxt->id == + ap_mvmvif->phy_ctxt->id); + + /* clients are not stand alone: enable PM if DCM */ + if (!(client_same_channel || ap_same_channel) && + (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) { + if (vifs->bss_active) + bss_mvmvif->pm_enabled = true; + if (vifs->p2p_active && + (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)) + p2p_mvmvif->pm_enabled = true; + return; + } + + /* + * There is only one channel in the system and there are only + * bss and p2p clients that share it + */ + if (client_same_channel && !vifs->ap_active && + (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) { + /* share same channel*/ + bss_mvmvif->pm_enabled = true; + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) + p2p_mvmvif->pm_enabled = true; + } +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, char *buf, + int bufsz) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_power_cmd cmd = {}; + int pos = 0; + + mutex_lock(&mvm->mutex); + memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd)); + mutex_unlock(&mvm->mutex); + + pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", + iwlmvm_mod_params.power_scheme); + pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", + le16_to_cpu(cmd.flags)); + pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", + le16_to_cpu(cmd.keep_alive_seconds)); + + if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) + return pos; + + pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0); + pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", + cmd.skip_dtim_periods); + if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { + pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", + le32_to_cpu(cmd.rx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", + le32_to_cpu(cmd.tx_data_timeout)); + } + if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) + pos += scnprintf(buf+pos, bufsz-pos, + "lprx_rssi_threshold = %d\n", + cmd.lprx_rssi_threshold); + + if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) + return pos; + + pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n", + le32_to_cpu(cmd.rx_data_timeout_uapsd)); + pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n", + le32_to_cpu(cmd.tx_data_timeout_uapsd)); + pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid); + pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n", + cmd.uapsd_ac_flags); + pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n", + cmd.uapsd_max_sp); + pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n", + cmd.heavy_tx_thld_packets); + pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n", + cmd.heavy_rx_thld_packets); + pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n", + cmd.heavy_tx_thld_percentage); + pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n", + cmd.heavy_rx_thld_percentage); + pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ? + 1 : 0); + + if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK))) + return pos; + + pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n", + cmd.snooze_interval); + pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n", + cmd.snooze_window); + + return pos; +} + +void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) + cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) + cmd->bf_roaming_energy_delta = + cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) + cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) + cmd->bf_temp_threshold = + cpu_to_le32(dbgfs_bf->bf_temp_threshold); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) + cmd->bf_temp_fast_filter = + cpu_to_le32(dbgfs_bf->bf_temp_fast_filter); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) + cmd->bf_temp_slow_filter = + cpu_to_le32(dbgfs_bf->bf_temp_slow_filter); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) + cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) + cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); + if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) + cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); + if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) + cmd->ba_enable_beacon_abort = + cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort); +} +#endif + +static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd, + u32 cmd_flags, + bool d0i3) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period || + vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3); + if (!d0i3) + iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); + ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); + + /* don't change bf_enabled in case of temporary d0i3 configuration */ + if (!ret && !d0i3) + mvmvif->bf_data.bf_enabled = true; + + return ret; +} + +int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags) +{ + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = cpu_to_le32(1), + }; + + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); +} + +static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool enable) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = cpu_to_le32(1), + }; + + if (!mvmvif->bf_data.bf_enabled) + return 0; + + if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); + + mvmvif->bf_data.ba_enabled = enable; + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); +} + +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags) +{ + struct iwl_beacon_filter_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); + + if (!ret) + mvmvif->bf_data.bf_enabled = false; + + return ret; +} + +static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) +{ + bool disable_ps; + int ret; + + /* disable PS if CAM */ + disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); + /* ...or if any of the vifs require PS to be off */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_ps_disabled_iterator, + &disable_ps); + + /* update device power state if it has changed */ + if (mvm->ps_disabled != disable_ps) { + bool old_ps_disabled = mvm->ps_disabled; + + mvm->ps_disabled = disable_ps; + ret = iwl_mvm_power_update_device(mvm); + if (ret) { + mvm->ps_disabled = old_ps_disabled; + return ret; + } + } + + return 0; +} + +static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, + struct iwl_power_vifs *vifs) +{ + struct iwl_mvm_vif *mvmvif; + bool ba_enable; + + if (!vifs->bf_vif) + return 0; + + mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); + + ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || + !vifs->bf_vif->bss_conf.ps || + iwl_mvm_vif_low_latency(mvmvif)); + + return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); +} + +int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) +{ + struct iwl_power_vifs vifs = { + .mvm = mvm, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* get vifs info */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_get_vifs_iterator, &vifs); + + ret = iwl_mvm_power_set_ps(mvm); + if (ret) + return ret; + + return iwl_mvm_power_set_ba(mvm, &vifs); +} + +int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) +{ + struct iwl_power_vifs vifs = { + .mvm = mvm, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* get vifs info */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_get_vifs_iterator, &vifs); + + iwl_mvm_power_set_pm(mvm, &vifs); + + ret = iwl_mvm_power_set_ps(mvm); + if (ret) + return ret; + + if (vifs.bss_vif) { + ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif); + if (ret) + return ret; + } + + if (vifs.p2p_vif) { + ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif); + if (ret) + return ret; + } + + return iwl_mvm_power_set_ba(mvm, &vifs); +} + +int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool enable, u32 flags) +{ + int ret; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_power_cmd cmd = {}; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + if (!vif->bss_conf.assoc) + return 0; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable); + + iwl_mvm_power_log(mvm, &cmd); +#ifdef CONFIG_IWLWIFI_DEBUGFS + memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd)); +#endif + ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags, + sizeof(cmd), &cmd); + if (ret) + return ret; + + /* configure beacon filtering */ + if (mvmvif != mvm->bf_allowed_vif) + return 0; + + if (enable) { + struct iwl_beacon_filter_cmd cmd_bf = { + IWL_BF_CMD_CONFIG_D0I3, + .bf_enable_beacon_filter = cpu_to_le32(1), + }; + ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, + flags, true); + } else { + if (mvmvif->bf_data.bf_enabled) + ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags); + else + ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags); + } + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c new file mode 100644 index 000000000000..509a66d05245 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -0,0 +1,328 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include "fw-api.h" +#include "mvm.h" + +#define QUOTA_100 IWL_MVM_MAX_QUOTA +#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100) + +struct iwl_mvm_quota_iterator_data { + int n_interfaces[MAX_BINDINGS]; + int colors[MAX_BINDINGS]; + int low_latency[MAX_BINDINGS]; + int n_low_latency_bindings; + struct ieee80211_vif *disabled_vif; +}; + +static void iwl_mvm_quota_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_quota_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 id; + + /* skip disabled interfaces here immediately */ + if (vif == data->disabled_vif) + return; + + if (!mvmvif->phy_ctxt) + return; + + /* currently, PHY ID == binding ID */ + id = mvmvif->phy_ctxt->id; + + /* need at least one binding per PHY */ + BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS); + + if (WARN_ON_ONCE(id >= MAX_BINDINGS)) + return; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (vif->bss_conf.assoc) + break; + return; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + if (mvmvif->ap_ibss_active) + break; + return; + case NL80211_IFTYPE_MONITOR: + if (mvmvif->monitor_active) + break; + return; + case NL80211_IFTYPE_P2P_DEVICE: + return; + default: + WARN_ON_ONCE(1); + return; + } + + if (data->colors[id] < 0) + data->colors[id] = mvmvif->phy_ctxt->color; + else + WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); + + data->n_interfaces[id]++; + + if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { + data->n_low_latency_bindings++; + data->low_latency[id] = true; + } +} + +static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, + struct iwl_time_quota_cmd *cmd) +{ +#ifdef CONFIG_NL80211_TESTMODE + struct iwl_mvm_vif *mvmvif; + int i, phy_id = -1, beacon_int = 0; + + if (!mvm->noa_duration || !mvm->noa_vif) + return; + + mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif); + if (!mvmvif->ap_ibss_active) + return; + + phy_id = mvmvif->phy_ctxt->id; + beacon_int = mvm->noa_vif->bss_conf.beacon_int; + + for (i = 0; i < MAX_BINDINGS; i++) { + u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color); + u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS; + u32 quota = le32_to_cpu(cmd->quotas[i].quota); + + if (id != phy_id) + continue; + + quota *= (beacon_int - mvm->noa_duration); + quota /= beacon_int; + + IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", + le32_to_cpu(cmd->quotas[i].quota), quota); + + cmd->quotas[i].quota = cpu_to_le32(quota); + } +#endif +} + +int iwl_mvm_update_quotas(struct iwl_mvm *mvm, + bool force_update, + struct ieee80211_vif *disabled_vif) +{ + struct iwl_time_quota_cmd cmd = {}; + int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat; + struct iwl_mvm_quota_iterator_data data = { + .n_interfaces = {}, + .colors = { -1, -1, -1, -1 }, + .disabled_vif = disabled_vif, + }; + struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd; + bool send = false; + + lockdep_assert_held(&mvm->mutex); + + /* update all upon completion */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return 0; + + /* iterator data above must match */ + BUILD_BUG_ON(MAX_BINDINGS != 4); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_quota_iterator, &data); + + /* + * The FW's scheduling session consists of + * IWL_MVM_MAX_QUOTA fragments. Divide these fragments + * equally between all the bindings that require quota + */ + num_active_macs = 0; + for (i = 0; i < MAX_BINDINGS; i++) { + cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); + num_active_macs += data.n_interfaces[i]; + } + + n_non_lowlat = num_active_macs; + + if (data.n_low_latency_bindings == 1) { + for (i = 0; i < MAX_BINDINGS; i++) { + if (data.low_latency[i]) { + n_non_lowlat -= data.n_interfaces[i]; + break; + } + } + } + + if (data.n_low_latency_bindings == 1 && n_non_lowlat) { + /* + * Reserve quota for the low latency binding in case that + * there are several data bindings but only a single + * low latency one. Split the rest of the quota equally + * between the other data interfaces. + */ + quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat; + quota_rem = QUOTA_100 - n_non_lowlat * quota - + QUOTA_LOWLAT_MIN; + IWL_DEBUG_QUOTA(mvm, + "quota: low-latency binding active, remaining quota per other binding: %d\n", + quota); + } else if (num_active_macs) { + /* + * There are 0 or more than 1 low latency bindings, or all the + * data interfaces belong to the single low latency binding. + * Split the quota equally between the data interfaces. + */ + quota = QUOTA_100 / num_active_macs; + quota_rem = QUOTA_100 % num_active_macs; + IWL_DEBUG_QUOTA(mvm, + "quota: splitting evenly per binding: %d\n", + quota); + } else { + /* values don't really matter - won't be used */ + quota = 0; + quota_rem = 0; + } + + for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { + if (data.colors[i] < 0) + continue; + + cmd.quotas[idx].id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); + + if (data.n_interfaces[i] <= 0) + cmd.quotas[idx].quota = cpu_to_le32(0); + else if (data.n_low_latency_bindings == 1 && n_non_lowlat && + data.low_latency[i]) + /* + * There is more than one binding, but only one of the + * bindings is in low latency. For this case, allocate + * the minimal required quota for the low latency + * binding. + */ + cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN); + else + cmd.quotas[idx].quota = + cpu_to_le32(quota * data.n_interfaces[i]); + + WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100, + "Binding=%d, quota=%u > max=%u\n", + idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100); + + cmd.quotas[idx].max_duration = cpu_to_le32(0); + + idx++; + } + + /* Give the remainder of the session to the first data binding */ + for (i = 0; i < MAX_BINDINGS; i++) { + if (le32_to_cpu(cmd.quotas[i].quota) != 0) { + le32_add_cpu(&cmd.quotas[i].quota, quota_rem); + IWL_DEBUG_QUOTA(mvm, + "quota: giving remainder of %d to binding %d\n", + quota_rem, i); + break; + } + } + + iwl_mvm_adjust_quota_for_noa(mvm, &cmd); + + /* check that we have non-zero quota for all valid bindings */ + for (i = 0; i < MAX_BINDINGS; i++) { + if (cmd.quotas[i].id_and_color != last->quotas[i].id_and_color) + send = true; + if (cmd.quotas[i].max_duration != last->quotas[i].max_duration) + send = true; + if (abs((int)le32_to_cpu(cmd.quotas[i].quota) - + (int)le32_to_cpu(last->quotas[i].quota)) + > IWL_MVM_QUOTA_THRESHOLD) + send = true; + if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID)) + continue; + WARN_ONCE(cmd.quotas[i].quota == 0, + "zero quota on binding %d\n", i); + } + + if (!send && !force_update) { + /* don't send a practically unchanged command, the firmware has + * to re-initialize a lot of state and that can have an adverse + * impact on it + */ + return 0; + } + + err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd); + + if (err) + IWL_ERR(mvm, "Failed to send quota: %d\n", err); + else + mvm->last_quota_cmd = cmd; + return err; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c new file mode 100644 index 000000000000..d1ad10391b47 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -0,0 +1,3983 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "rs.h" +#include "fw-api.h" +#include "sta.h" +#include "iwl-op-mode.h" +#include "mvm.h" +#include "debugfs.h" + +#define RS_NAME "iwl-mvm-rs" + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ + +/* Calculations of success ratio are done in fixed point where 12800 is 100%. + * Use this macro when dealing with thresholds consts set as a percentage + */ +#define RS_PERCENT(x) (128 * x) + +static u8 rs_ht_to_legacy[] = { + [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX, + [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX, + [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX, + [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX, + [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX, + [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX, + [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX, + [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX, + [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX, +}; + +static const u8 ant_toggle_lookup[] = { + [ANT_NONE] = ANT_NONE, + [ANT_A] = ANT_B, + [ANT_B] = ANT_C, + [ANT_AB] = ANT_BC, + [ANT_C] = ANT_A, + [ANT_AC] = ANT_AB, + [ANT_BC] = ANT_AC, + [ANT_ABC] = ANT_ABC, +}; + +#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_HT_SISO_MCS_##s##_PLCP, \ + IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \ + IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \ + IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX } + +#define IWL_DECLARE_MCS_RATE(s) \ + [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \ + IWL_RATE_HT_SISO_MCS_##s##_PLCP, \ + IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \ + IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \ + IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \ + IWL_RATE_INVM_INDEX, \ + IWL_RATE_INVM_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */ + IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */ + IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */ + IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */ + IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */ + IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */ + IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */ + IWL_DECLARE_MCS_RATE(7), /* MCS 7 */ + IWL_DECLARE_MCS_RATE(8), /* MCS 8 */ + IWL_DECLARE_MCS_RATE(9), /* MCS 9 */ +}; + +enum rs_action { + RS_ACTION_STAY = 0, + RS_ACTION_DOWNSCALE = -1, + RS_ACTION_UPSCALE = 1, +}; + +enum rs_column_mode { + RS_INVALID = 0, + RS_LEGACY, + RS_SISO, + RS_MIMO2, +}; + +#define MAX_NEXT_COLUMNS 7 +#define MAX_COLUMN_CHECKS 3 + +struct rs_tx_column; + +typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct rs_rate *rate, + const struct rs_tx_column *next_col); + +struct rs_tx_column { + enum rs_column_mode mode; + u8 ant; + bool sgi; + enum rs_column next_columns[MAX_NEXT_COLUMNS]; + allow_column_func_t checks[MAX_COLUMN_CHECKS]; +}; + +static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct rs_rate *rate, + const struct rs_tx_column *next_col) +{ + return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); +} + +static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct rs_rate *rate, + const struct rs_tx_column *next_col) +{ + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_vif *mvmvif; + + if (!sta->ht_cap.ht_supported) + return false; + + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + return false; + + if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2) + return false; + + if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) + return false; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mvm->nvm_data->sku_cap_mimo_disabled) + return false; + + return true; +} + +static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct rs_rate *rate, + const struct rs_tx_column *next_col) +{ + if (!sta->ht_cap.ht_supported) + return false; + + return true; +} + +static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct rs_rate *rate, + const struct rs_tx_column *next_col) +{ + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + + if (is_ht20(rate) && (ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + return true; + if (is_ht40(rate) && (ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + return true; + if (is_ht80(rate) && (vht_cap->cap & + IEEE80211_VHT_CAP_SHORT_GI_80)) + return true; + + return false; +} + +static const struct rs_tx_column rs_tx_columns[] = { + [RS_COLUMN_LEGACY_ANT_A] = { + .mode = RS_LEGACY, + .ant = ANT_A, + .next_columns = { + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_SISO_ANT_A, + RS_COLUMN_MIMO2, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_ant_allow, + }, + }, + [RS_COLUMN_LEGACY_ANT_B] = { + .mode = RS_LEGACY, + .ant = ANT_B, + .next_columns = { + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_SISO_ANT_B, + RS_COLUMN_MIMO2, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_ant_allow, + }, + }, + [RS_COLUMN_SISO_ANT_A] = { + .mode = RS_SISO, + .ant = ANT_A, + .next_columns = { + RS_COLUMN_SISO_ANT_B, + RS_COLUMN_MIMO2, + RS_COLUMN_SISO_ANT_A_SGI, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_siso_allow, + rs_ant_allow, + }, + }, + [RS_COLUMN_SISO_ANT_B] = { + .mode = RS_SISO, + .ant = ANT_B, + .next_columns = { + RS_COLUMN_SISO_ANT_A, + RS_COLUMN_MIMO2, + RS_COLUMN_SISO_ANT_B_SGI, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_siso_allow, + rs_ant_allow, + }, + }, + [RS_COLUMN_SISO_ANT_A_SGI] = { + .mode = RS_SISO, + .ant = ANT_A, + .sgi = true, + .next_columns = { + RS_COLUMN_SISO_ANT_B_SGI, + RS_COLUMN_MIMO2_SGI, + RS_COLUMN_SISO_ANT_A, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_siso_allow, + rs_ant_allow, + rs_sgi_allow, + }, + }, + [RS_COLUMN_SISO_ANT_B_SGI] = { + .mode = RS_SISO, + .ant = ANT_B, + .sgi = true, + .next_columns = { + RS_COLUMN_SISO_ANT_A_SGI, + RS_COLUMN_MIMO2_SGI, + RS_COLUMN_SISO_ANT_B, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_siso_allow, + rs_ant_allow, + rs_sgi_allow, + }, + }, + [RS_COLUMN_MIMO2] = { + .mode = RS_MIMO2, + .ant = ANT_AB, + .next_columns = { + RS_COLUMN_SISO_ANT_A, + RS_COLUMN_MIMO2_SGI, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_mimo_allow, + }, + }, + [RS_COLUMN_MIMO2_SGI] = { + .mode = RS_MIMO2, + .ant = ANT_AB, + .sgi = true, + .next_columns = { + RS_COLUMN_SISO_ANT_A_SGI, + RS_COLUMN_MIMO2, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + }, + .checks = { + rs_mimo_allow, + rs_sgi_allow, + }, + }, +}; + +static inline u8 rs_extract_rate(u32 rate_n_flags) +{ + /* also works for HT because bits 7:6 are zero there */ + return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK); +} + +static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK; + idx += IWL_RATE_MCS_0_INDEX; + + /* skip 9M not supported in HT*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) + return idx; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + idx += IWL_RATE_MCS_0_INDEX; + + /* skip 9M not supported in VHT*/ + if (idx >= IWL_RATE_9M_INDEX) + idx++; + if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) + return idx; + } else { + /* legacy rate format, search for match in table */ + + u8 legacy_rate = rs_extract_rate(rate_n_flags); + for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) + if (iwl_rates[idx].plcp == legacy_rate) + return idx; + } + + return IWL_RATE_INVALID; +} + +static void rs_rate_scale_perform(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + int tid); +static void rs_fill_lq_cmd(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + const struct rs_rate *initial_rate); +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0 +}; + +/* Expected TpT tables. 4 indexes: + * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI + */ +static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0}, + {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0}, + {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0}, + {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0}, +}; + +static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275}, + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280}, + {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173}, + {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284}, +}; + +static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308}, + {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312}, + {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466}, + {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691}, +}; + +static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0}, + {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0}, + {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0}, + {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0}, +}; + +static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300}, + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303}, + {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053}, + {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221}, +}; + +static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319}, + {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320}, + {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219}, + {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545}, +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static const char *rs_pretty_ant(u8 ant) +{ + static const char * const ant_name[] = { + [ANT_NONE] = "None", + [ANT_A] = "A", + [ANT_B] = "B", + [ANT_AB] = "AB", + [ANT_C] = "C", + [ANT_AC] = "AC", + [ANT_BC] = "BC", + [ANT_ABC] = "ABC", + }; + + if (ant > ANT_ABC) + return "UNKNOWN"; + + return ant_name[ant]; +} + +static const char *rs_pretty_lq_type(enum iwl_table_type type) +{ + static const char * const lq_types[] = { + [LQ_NONE] = "NONE", + [LQ_LEGACY_A] = "LEGACY_A", + [LQ_LEGACY_G] = "LEGACY_G", + [LQ_HT_SISO] = "HT SISO", + [LQ_HT_MIMO2] = "HT MIMO", + [LQ_VHT_SISO] = "VHT SISO", + [LQ_VHT_MIMO2] = "VHT MIMO", + }; + + if (type < LQ_NONE || type >= LQ_MAX) + return "UNKNOWN"; + + return lq_types[type]; +} + +static char *rs_pretty_rate(const struct rs_rate *rate) +{ + static char buf[40]; + static const char * const legacy_rates[] = { + [IWL_RATE_1M_INDEX] = "1M", + [IWL_RATE_2M_INDEX] = "2M", + [IWL_RATE_5M_INDEX] = "5.5M", + [IWL_RATE_11M_INDEX] = "11M", + [IWL_RATE_6M_INDEX] = "6M", + [IWL_RATE_9M_INDEX] = "9M", + [IWL_RATE_12M_INDEX] = "12M", + [IWL_RATE_18M_INDEX] = "18M", + [IWL_RATE_24M_INDEX] = "24M", + [IWL_RATE_36M_INDEX] = "36M", + [IWL_RATE_48M_INDEX] = "48M", + [IWL_RATE_54M_INDEX] = "54M", + }; + static const char *const ht_vht_rates[] = { + [IWL_RATE_MCS_0_INDEX] = "MCS0", + [IWL_RATE_MCS_1_INDEX] = "MCS1", + [IWL_RATE_MCS_2_INDEX] = "MCS2", + [IWL_RATE_MCS_3_INDEX] = "MCS3", + [IWL_RATE_MCS_4_INDEX] = "MCS4", + [IWL_RATE_MCS_5_INDEX] = "MCS5", + [IWL_RATE_MCS_6_INDEX] = "MCS6", + [IWL_RATE_MCS_7_INDEX] = "MCS7", + [IWL_RATE_MCS_8_INDEX] = "MCS8", + [IWL_RATE_MCS_9_INDEX] = "MCS9", + }; + const char *rate_str; + + if (is_type_legacy(rate->type)) + rate_str = legacy_rates[rate->index]; + else if (is_type_ht(rate->type) || is_type_vht(rate->type)) + rate_str = ht_vht_rates[rate->index]; + else + rate_str = "BAD_RATE"; + + sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type), + rs_pretty_ant(rate->ant), rate_str); + return buf; +} + +static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, + const char *prefix) +{ + IWL_DEBUG_RATE(mvm, + "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n", + prefix, rs_pretty_rate(rate), rate->bw, + rate->sgi, rate->ldpc, rate->stbc); +} + +static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; +} + +static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm, + struct iwl_scale_tbl_info *tbl) +{ + int i; + + IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&tbl->win[i]); + + for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) + rs_rate_scale_clear_window(&tbl->tpc_win[i]); +} + +static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + + IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + return ret; +} + +static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < IWL_MAX_TID_COUNT) + rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta); + else + IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n", + tid, IWL_MAX_TID_COUNT); +} + +static inline int get_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/** + * rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int _rs_collect_tx_data(struct iwl_mvm *mvm, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes, + struct iwl_rate_scale_data *window) +{ + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + /* Get expected throughput */ + tpt = get_expected_tpt(tbl, scale_index); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + return 0; +} + +static int rs_collect_tx_data(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes, + u8 reduced_txp) +{ + struct iwl_rate_scale_data *window = NULL; + int ret; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + if (tbl->column != RS_COLUMN_INVALID) { + struct lq_sta_pers *pers = &lq_sta->pers; + + pers->tx_stats[tbl->column][scale_index].total += attempts; + pers->tx_stats[tbl->column][scale_index].success += successes; + } + + /* Select window for current tx bit rate */ + window = &(tbl->win[scale_index]); + + ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, + window); + if (ret) + return ret; + + if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) + return -EINVAL; + + window = &tbl->tpc_win[reduced_txp]; + return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, + window); +} + +/* Convert rs_rate object into ucode rate bitmask */ +static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, + struct rs_rate *rate) +{ + u32 ucode_rate = 0; + int index = rate->index; + + ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_legacy(rate)) { + ucode_rate |= iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + ucode_rate |= RATE_MCS_CCK_MSK; + return ucode_rate; + } + + if (is_ht(rate)) { + if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) { + IWL_ERR(mvm, "Invalid HT rate index %d\n", index); + index = IWL_LAST_HT_RATE; + } + ucode_rate |= RATE_MCS_HT_MSK; + + if (is_ht_siso(rate)) + ucode_rate |= iwl_rates[index].plcp_ht_siso; + else if (is_ht_mimo2(rate)) + ucode_rate |= iwl_rates[index].plcp_ht_mimo2; + else + WARN_ON_ONCE(1); + } else if (is_vht(rate)) { + if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) { + IWL_ERR(mvm, "Invalid VHT rate index %d\n", index); + index = IWL_LAST_VHT_RATE; + } + ucode_rate |= RATE_MCS_VHT_MSK; + if (is_vht_siso(rate)) + ucode_rate |= iwl_rates[index].plcp_vht_siso; + else if (is_vht_mimo2(rate)) + ucode_rate |= iwl_rates[index].plcp_vht_mimo2; + else + WARN_ON_ONCE(1); + + } else { + IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type); + } + + if (is_siso(rate) && rate->stbc) { + /* To enable STBC we need to set both a flag and ANT_AB */ + ucode_rate |= RATE_MCS_ANT_AB_MSK; + ucode_rate |= RATE_MCS_VHT_STBC_MSK; + } + + ucode_rate |= rate->bw; + if (rate->sgi) + ucode_rate |= RATE_MCS_SGI_MSK; + if (rate->ldpc) + ucode_rate |= RATE_MCS_LDPC_MSK; + + return ucode_rate; +} + +/* Convert a ucode rate into an rs_rate object */ +static int rs_rate_from_ucode_rate(const u32 ucode_rate, + enum ieee80211_band band, + struct rs_rate *rate) +{ + u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; + u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate); + u8 nss; + + memset(rate, 0, sizeof(*rate)); + rate->index = iwl_hwrate_to_plcp_idx(ucode_rate); + + if (rate->index == IWL_RATE_INVALID) + return -EINVAL; + + rate->ant = (ant_msk >> RATE_MCS_ANT_POS); + + /* Legacy */ + if (!(ucode_rate & RATE_MCS_HT_MSK) && + !(ucode_rate & RATE_MCS_VHT_MSK)) { + if (num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + rate->type = LQ_LEGACY_A; + else + rate->type = LQ_LEGACY_G; + } + + return 0; + } + + /* HT or VHT */ + if (ucode_rate & RATE_MCS_SGI_MSK) + rate->sgi = true; + if (ucode_rate & RATE_MCS_LDPC_MSK) + rate->ldpc = true; + if (ucode_rate & RATE_MCS_VHT_STBC_MSK) + rate->stbc = true; + if (ucode_rate & RATE_MCS_BF_MSK) + rate->bfer = true; + + rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; + + if (ucode_rate & RATE_MCS_HT_MSK) { + nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >> + RATE_HT_MCS_NSS_POS) + 1; + + if (nss == 1) { + rate->type = LQ_HT_SISO; + WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, + "stbc %d bfer %d", + rate->stbc, rate->bfer); + } else if (nss == 2) { + rate->type = LQ_HT_MIMO2; + WARN_ON_ONCE(num_of_ant != 2); + } else { + WARN_ON_ONCE(1); + } + } else if (ucode_rate & RATE_MCS_VHT_MSK) { + nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + + if (nss == 1) { + rate->type = LQ_VHT_SISO; + WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, + "stbc %d bfer %d", + rate->stbc, rate->bfer); + } else if (nss == 2) { + rate->type = LQ_VHT_MIMO2; + WARN_ON_ONCE(num_of_ant != 2); + } else { + WARN_ON_ONCE(1); + } + } + + WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160); + WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && + !is_vht(rate)); + + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate) +{ + u8 new_ant_type; + + if (!rate->ant || rate->ant > ANT_ABC) + return 0; + + if (!rs_is_valid_ant(valid_ant, rate->ant)) + return 0; + + new_ant_type = ant_toggle_lookup[rate->ant]; + + while ((new_ant_type != rate->ant) && + !rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == rate->ant) + return 0; + + rate->ant = new_ant_type; + + return 1; +} + +static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct rs_rate *rate) +{ + if (is_legacy(rate)) + return lq_sta->active_legacy_rate; + else if (is_siso(rate)) + return lq_sta->active_siso_rate; + else if (is_mimo2(rate)) + return lq_sta->active_mimo2_rate; + + WARN_ON_ONCE(1); + return 0; +} + +static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + } + + return (high << 8) | low; +} + +static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta, + struct rs_rate *rate) +{ + return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate); +} + +/* Get the next supported lower rate in the current column. + * Return true if bottom rate in the current column was reached + */ +static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta, + struct rs_rate *rate) +{ + u8 low; + u16 high_low; + u16 rate_mask; + struct iwl_mvm *mvm = lq_sta->pers.drv; + + rate_mask = rs_get_supported_rates(lq_sta, rate); + high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask, + rate->type); + low = high_low & 0xff; + + /* Bottom rate of column reached */ + if (low == IWL_RATE_INVALID) + return true; + + rate->index = low; + return false; +} + +/* Get the next rate to use following a column downgrade */ +static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, + struct rs_rate *rate) +{ + struct iwl_mvm *mvm = lq_sta->pers.drv; + + if (is_legacy(rate)) { + /* No column to downgrade from Legacy */ + return; + } else if (is_siso(rate)) { + /* Downgrade to Legacy if we were in SISO */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate->type = LQ_LEGACY_A; + else + rate->type = LQ_LEGACY_G; + + rate->bw = RATE_MCS_CHAN_WIDTH_20; + + WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX || + rate->index > IWL_RATE_MCS_9_INDEX); + + rate->index = rs_ht_to_legacy[rate->index]; + rate->ldpc = false; + } else { + /* Downgrade to SISO with same MCS if in MIMO */ + rate->type = is_vht_mimo2(rate) ? + LQ_VHT_SISO : LQ_HT_SISO; + } + + if (num_of_ant(rate->ant) > 1) + rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm)); + + /* Relevant in both switching to SISO or Legacy */ + rate->sgi = false; + + if (!rs_rate_supported(lq_sta, rate)) + rs_get_lower_rate_in_column(lq_sta, rate); +} + +/* Check if both rates are identical + * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B + * with a rate indicating STBC/BFER and ANT_AB. + */ +static inline bool rs_rate_equal(struct rs_rate *a, + struct rs_rate *b, + bool allow_ant_mismatch) + +{ + bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) && + (a->bfer == b->bfer); + + if (allow_ant_mismatch) { + if (a->stbc || a->bfer) { + WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d", + a->stbc, a->bfer, a->ant); + ant_match |= (b->ant == ANT_A || b->ant == ANT_B); + } else if (b->stbc || b->bfer) { + WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d", + b->stbc, b->bfer, b->ant); + ant_match |= (a->ant == ANT_A || a->ant == ANT_B); + } + } + + return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && + (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match; +} + +/* Check if both rates share the same column */ +static inline bool rs_rate_column_match(struct rs_rate *a, + struct rs_rate *b) +{ + bool ant_match; + + if (a->stbc || a->bfer) + ant_match = (b->ant == ANT_A || b->ant == ANT_B); + else + ant_match = (a->ant == b->ant); + + return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) + && ant_match; +} + +static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate) +{ + if (is_legacy(rate)) { + if (rate->ant == ANT_A) + return RS_COLUMN_LEGACY_ANT_A; + + if (rate->ant == ANT_B) + return RS_COLUMN_LEGACY_ANT_B; + + goto err; + } + + if (is_siso(rate)) { + if (rate->ant == ANT_A || rate->stbc || rate->bfer) + return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI : + RS_COLUMN_SISO_ANT_A; + + if (rate->ant == ANT_B) + return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI : + RS_COLUMN_SISO_ANT_B; + + goto err; + } + + if (is_mimo(rate)) + return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2; + +err: + return RS_COLUMN_INVALID; +} + +static u8 rs_get_tid(struct ieee80211_hdr *hdr) +{ + u8 tid = IWL_MAX_TID_COUNT; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + if (unlikely(tid > IWL_MAX_TID_COUNT)) + tid = IWL_MAX_TID_COUNT; + + return tid; +} + +void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, struct ieee80211_tx_info *info) +{ + int legacy_success; + int retries; + int i; + struct iwl_lq_cmd *table; + u32 lq_hwrate; + struct rs_rate lq_rate, tx_resp_rate; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; + u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; + bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_LQ_SS_PARAMS); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->pers.drv) { + IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); + return; + } + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); + +#ifdef CONFIG_MAC80211_DEBUGFS + /* Disable last tx check if we are debugging with fixed rate but + * update tx stats */ + if (lq_sta->pers.dbg_fixed_rate) { + int index = tx_resp_rate.index; + enum rs_column column; + int attempts, success; + + column = rs_get_column_from_rate(&tx_resp_rate); + if (WARN_ONCE(column == RS_COLUMN_INVALID, + "Can't map rate 0x%x to column", + tx_resp_hwrate)) + return; + + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + attempts = info->status.ampdu_len; + success = info->status.ampdu_ack_len; + } else { + attempts = info->status.rates[0].count; + success = !!(info->flags & IEEE80211_TX_STAT_ACK); + } + + lq_sta->pers.tx_stats[column][index].total += attempts; + lq_sta->pers.tx_stats[column][index].success += success; + + IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n", + tx_resp_hwrate, success, attempts); + return; + } +#endif + + if (time_after(jiffies, + (unsigned long)(lq_sta->last_tx + + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { + int t; + + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); + for (t = 0; t < IWL_MAX_TID_COUNT; t++) + ieee80211_stop_tx_ba_session(sta, t); + + iwl_mvm_rs_rate_init(mvm, sta, info->band, false); + return; + } + lq_sta->last_tx = jiffies; + + /* Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + lq_hwrate = le32_to_cpu(table->rs_table[0]); + rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); + + /* Here we actually compare this rate to the latest LQ command */ + if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { + IWL_DEBUG_RATE(mvm, + "initial tx resp rate 0x%x does not match 0x%x\n", + tx_resp_hwrate, lq_hwrate); + + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + IWL_DEBUG_RATE(mvm, + "Too many rates mismatch. Send sync LQ. rs_state %d\n", + lq_sta->rs_state); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + if (!lq_sta->search_better_tbl) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } + + if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) { + IWL_DEBUG_RATE(mvm, + "Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); + rs_dump_rate(mvm, &lq_rate, "ACTUAL"); + + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + /* ampdu_ack_len = 0 marks no BA was received. In this case + * treat it as a single frame loss as we don't want the success + * ratio to dip too quickly because a BA wasn't received + */ + if (info->status.ampdu_ack_len == 0) + info->status.ampdu_len = 1; + + rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, + info->status.ampdu_len, + info->status.ampdu_ack_len, + reduced_txp); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* For legacy, update frame history with for each Tx retry. */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + lq_hwrate = le32_to_cpu(table->rs_table[i]); + rs_rate_from_ucode_rate(lq_hwrate, info->band, + &lq_rate); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (rs_rate_column_match(&lq_rate, &curr_tbl->rate)) + tmp_tbl = curr_tbl; + else if (rs_rate_column_match(&lq_rate, + &other_tbl->rate)) + tmp_tbl = other_tbl; + else + continue; + + rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, + 1, i < retries ? 0 : legacy_success, + reduced_txp); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = lq_hwrate; + IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta->supp_rates[info->band]) + rs_rate_scale_perform(mvm, sta, lq_sta, tid); +} + +/* + * mac80211 sends us Tx status + */ +static void rs_mac80211_tx_status(void *mvm_r, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r; + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (!iwl_mvm_sta_from_mac80211(sta)->vif) + return; + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n"); + lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN; + if (is_legacy) { + lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->visited_columns = 0; +} + +static inline int rs_get_max_rate_from_mask(unsigned long rate_mask) +{ + if (rate_mask) + return find_last_bit(&rate_mask, BITS_PER_LONG); + return IWL_RATE_INVALID; +} + +static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta, + const struct rs_tx_column *column) +{ + switch (column->mode) { + case RS_LEGACY: + return lq_sta->max_legacy_rate_idx; + case RS_SISO: + return lq_sta->max_siso_rate_idx; + case RS_MIMO2: + return lq_sta->max_mimo2_rate_idx; + default: + WARN_ON_ONCE(1); + } + + return lq_sta->max_legacy_rate_idx; +} + +static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, + const struct rs_tx_column *column, + u32 bw) +{ + /* Used to choose among HT tables */ + const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + if (WARN_ON_ONCE(column->mode != RS_LEGACY && + column->mode != RS_SISO && + column->mode != RS_MIMO2)) + return expected_tpt_legacy; + + /* Legacy rates have only one table */ + if (column->mode == RS_LEGACY) + return expected_tpt_legacy; + + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation + * status */ + if (column->mode == RS_SISO) { + switch (bw) { + case RATE_MCS_CHAN_WIDTH_20: + ht_tbl_pointer = expected_tpt_siso_20MHz; + break; + case RATE_MCS_CHAN_WIDTH_40: + ht_tbl_pointer = expected_tpt_siso_40MHz; + break; + case RATE_MCS_CHAN_WIDTH_80: + ht_tbl_pointer = expected_tpt_siso_80MHz; + break; + default: + WARN_ON_ONCE(1); + } + } else if (column->mode == RS_MIMO2) { + switch (bw) { + case RATE_MCS_CHAN_WIDTH_20: + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + break; + case RATE_MCS_CHAN_WIDTH_40: + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + break; + case RATE_MCS_CHAN_WIDTH_80: + ht_tbl_pointer = expected_tpt_mimo2_80MHz; + break; + default: + WARN_ON_ONCE(1); + } + } else { + WARN_ON_ONCE(1); + } + + if (!column->sgi && !lq_sta->is_agg) /* Normal */ + return ht_tbl_pointer[0]; + else if (column->sgi && !lq_sta->is_agg) /* SGI */ + return ht_tbl_pointer[1]; + else if (!column->sgi && lq_sta->is_agg) /* AGG */ + return ht_tbl_pointer[2]; + else /* AGG+SGI */ + return ht_tbl_pointer[3]; +} + +static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + struct rs_rate *rate = &tbl->rate; + const struct rs_tx_column *column = &rs_tx_columns[tbl->column]; + + tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); +} + +static s32 rs_get_best_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + unsigned long rate_mask, s8 index) +{ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 success_ratio = active_tbl->win[index].success_ratio; + u16 expected_current_tpt = active_tbl->expected_tpt[index]; + const u16 *tpt_tbl = tbl->expected_tpt; + u16 high_low; + u32 target_tpt; + int rate_idx; + + if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) { + target_tpt = 100 * expected_current_tpt; + IWL_DEBUG_RATE(mvm, + "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n", + success_ratio, target_tpt); + } else { + target_tpt = lq_sta->last_tpt; + IWL_DEBUG_RATE(mvm, + "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n", + success_ratio, target_tpt); + } + + rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG); + + while (rate_idx != IWL_RATE_INVALID) { + if (target_tpt < (100 * tpt_tbl[rate_idx])) + break; + + high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask, + tbl->rate.type); + + rate_idx = (high_low >> 8) & 0xff; + } + + IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n", + rate_idx, target_tpt, + rate_idx != IWL_RATE_INVALID ? + 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE); + + return rate_idx; +} + +static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) +{ + if (sta->bandwidth >= IEEE80211_STA_RX_BW_80) + return RATE_MCS_CHAN_WIDTH_80; + else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) + return RATE_MCS_CHAN_WIDTH_40; + + return RATE_MCS_CHAN_WIDTH_20; +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) +{ + struct iwl_scale_tbl_info *tbl; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_mvm *mvm; + + mvm = lq_sta->pers.drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ))); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + (lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && + (lq_sta->flush_timer) && (flush_interval_passed))) { + IWL_DEBUG_RATE(mvm, + "LQ: stay is expired %d %d %d\n", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED; + IWL_DEBUG_RATE(mvm, + "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n"); + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + /* mark the current column as visited */ + lq_sta->visited_columns = BIT(tbl->column); + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(mvm, + "LQ: stay in table clear win\n"); + rs_rate_scale_clear_tbl_windows(mvm, tbl); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { + rs_rate_scale_clear_tbl_windows(mvm, tbl); + } + } +} + +/* + * setup rate table in uCode + */ +static void rs_update_rate_tbl(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); +} + +static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + enum rs_action scale_action) +{ + if (sta->bandwidth != IEEE80211_STA_RX_BW_80) + return false; + + if (!is_vht_siso(&tbl->rate)) + return false; + + if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) && + (tbl->rate.index == IWL_RATE_MCS_0_INDEX) && + (scale_action == RS_ACTION_DOWNSCALE)) { + tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20; + tbl->rate.index = IWL_RATE_MCS_4_INDEX; + IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n"); + goto tweaked; + } + + /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is + * sustainable, i.e. we're past the test window. We can't go back + * if MCS5 is just tested as this will happen always after switching + * to 20Mhz MCS4 because the rate stats are cleared. + */ + if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) && + (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) && + (scale_action == RS_ACTION_STAY)) || + ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) && + (scale_action == RS_ACTION_UPSCALE)))) { + tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80; + tbl->rate.index = IWL_RATE_MCS_1_INDEX; + IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n"); + goto tweaked; + } + + return false; + +tweaked: + rs_set_expected_tpt_table(lq_sta, tbl); + rs_rate_scale_clear_tbl_windows(mvm, tbl); + return true; +} + +static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl) +{ + int i, j, max_rate; + enum rs_column next_col_id; + const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; + const struct rs_tx_column *next_col; + allow_column_func_t allow_func; + u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm); + const u16 *expected_tpt_tbl; + u16 tpt, max_expected_tpt; + + for (i = 0; i < MAX_NEXT_COLUMNS; i++) { + next_col_id = curr_col->next_columns[i]; + + if (next_col_id == RS_COLUMN_INVALID) + continue; + + if (lq_sta->visited_columns & BIT(next_col_id)) { + IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n", + next_col_id); + continue; + } + + next_col = &rs_tx_columns[next_col_id]; + + if (!rs_is_valid_ant(valid_ants, next_col->ant)) { + IWL_DEBUG_RATE(mvm, + "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n", + next_col_id, valid_ants, next_col->ant); + continue; + } + + for (j = 0; j < MAX_COLUMN_CHECKS; j++) { + allow_func = next_col->checks[j]; + if (allow_func && !allow_func(mvm, sta, &tbl->rate, + next_col)) + break; + } + + if (j != MAX_COLUMN_CHECKS) { + IWL_DEBUG_RATE(mvm, + "Skip column %d: not allowed (check %d failed)\n", + next_col_id, j); + + continue; + } + + tpt = lq_sta->last_tpt / 100; + expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col, + rs_bw_from_sta_bw(sta)); + if (WARN_ON_ONCE(!expected_tpt_tbl)) + continue; + + max_rate = rs_get_max_allowed_rate(lq_sta, next_col); + if (max_rate == IWL_RATE_INVALID) { + IWL_DEBUG_RATE(mvm, + "Skip column %d: no rate is allowed in this column\n", + next_col_id); + continue; + } + + max_expected_tpt = expected_tpt_tbl[max_rate]; + if (tpt >= max_expected_tpt) { + IWL_DEBUG_RATE(mvm, + "Skip column %d: can't beat current TPT. Max expected %d current %d\n", + next_col_id, max_expected_tpt, tpt); + continue; + } + + IWL_DEBUG_RATE(mvm, + "Found potential column %d. Max expected %d current %d\n", + next_col_id, max_expected_tpt, tpt); + break; + } + + if (i == MAX_NEXT_COLUMNS) + return RS_COLUMN_INVALID; + + return next_col_id; +} + +static int rs_switch_to_column(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta *sta, + enum rs_column col_id) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct rs_rate *rate = &search_tbl->rate; + const struct rs_tx_column *column = &rs_tx_columns[col_id]; + const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + unsigned long rate_mask = 0; + u32 rate_idx = 0; + + memcpy(search_tbl, tbl, sz); + + rate->sgi = column->sgi; + rate->ant = column->ant; + + if (column->mode == RS_LEGACY) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate->type = LQ_LEGACY_A; + else + rate->type = LQ_LEGACY_G; + + rate->bw = RATE_MCS_CHAN_WIDTH_20; + rate->ldpc = false; + rate_mask = lq_sta->active_legacy_rate; + } else if (column->mode == RS_SISO) { + rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; + rate_mask = lq_sta->active_siso_rate; + } else if (column->mode == RS_MIMO2) { + rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; + rate_mask = lq_sta->active_mimo2_rate; + } else { + WARN_ON_ONCE("Bad column mode"); + } + + if (column->mode != RS_LEGACY) { + rate->bw = rs_bw_from_sta_bw(sta); + rate->ldpc = lq_sta->ldpc; + } + + search_tbl->column = col_id; + rs_set_expected_tpt_table(lq_sta, search_tbl); + + lq_sta->visited_columns |= BIT(col_id); + + /* Get the best matching rate if we're changing modes. e.g. + * SISO->MIMO, LEGACY->SISO, MIMO->SISO + */ + if (curr_column->mode != column->mode) { + rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl, + rate_mask, rate->index); + + if ((rate_idx == IWL_RATE_INVALID) || + !(BIT(rate_idx) & rate_mask)) { + IWL_DEBUG_RATE(mvm, + "can not switch with index %d" + " rate mask %lx\n", + rate_idx, rate_mask); + + goto err; + } + + rate->index = rate_idx; + } + + IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n", + col_id, rate->index); + + return 0; + +err: + rate->type = LQ_NONE; + return -1; +} + +static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm, + struct iwl_scale_tbl_info *tbl, + s32 sr, int low, int high, + int current_tpt, + int low_tpt, int high_tpt) +{ + enum rs_action action = RS_ACTION_STAY; + + if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) || + (current_tpt == 0)) { + IWL_DEBUG_RATE(mvm, + "Decrease rate because of low SR\n"); + return RS_ACTION_DOWNSCALE; + } + + if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE) && + (high != IWL_RATE_INVALID)) { + IWL_DEBUG_RATE(mvm, + "No data about high/low rates. Increase rate\n"); + return RS_ACTION_UPSCALE; + } + + if ((high_tpt == IWL_INVALID_VALUE) && + (high != IWL_RATE_INVALID) && + (low_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt)) { + IWL_DEBUG_RATE(mvm, + "No data about high rate and low rate is worse. Increase rate\n"); + return RS_ACTION_UPSCALE; + } + + if ((high_tpt != IWL_INVALID_VALUE) && + (high_tpt > current_tpt)) { + IWL_DEBUG_RATE(mvm, + "Higher rate is better. Increate rate\n"); + return RS_ACTION_UPSCALE; + } + + if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) { + IWL_DEBUG_RATE(mvm, + "Both high and low are worse. Maintain rate\n"); + return RS_ACTION_STAY; + } + + if ((low_tpt != IWL_INVALID_VALUE) && + (low_tpt > current_tpt)) { + IWL_DEBUG_RATE(mvm, + "Lower rate is better\n"); + action = RS_ACTION_DOWNSCALE; + goto out; + } + + if ((low_tpt == IWL_INVALID_VALUE) && + (low != IWL_RATE_INVALID)) { + IWL_DEBUG_RATE(mvm, + "No data about lower rate\n"); + action = RS_ACTION_DOWNSCALE; + goto out; + } + + IWL_DEBUG_RATE(mvm, "Maintain rate\n"); + +out: + if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) { + if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) { + IWL_DEBUG_RATE(mvm, + "SR is above NO DECREASE. Avoid downscale\n"); + action = RS_ACTION_STAY; + } else if (current_tpt > (100 * tbl->expected_tpt[low])) { + IWL_DEBUG_RATE(mvm, + "Current TPT is higher than max expected in low rate. Avoid downscale\n"); + action = RS_ACTION_STAY; + } else { + IWL_DEBUG_RATE(mvm, "Decrease rate\n"); + } + } + + return action; +} + +static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + /* Our chip supports Tx STBC and the peer is an HT/VHT STA which + * supports STBC of at least 1*SS + */ + if (!lq_sta->stbc_capable) + return false; + + if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) + return false; + + return true; +} + +static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, + int *weaker, int *stronger) +{ + *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP; + if (*weaker > TPC_MAX_REDUCTION) + *weaker = TPC_INVALID; + + *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP; + if (*stronger < 0) + *stronger = TPC_INVALID; +} + +static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct rs_rate *rate, enum ieee80211_band band) +{ + int index = rate->index; + bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); + bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION && + !vif->bss_conf.ps); + + IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n", + cam, sta_ps_disabled); + /* + * allow tpc only if power management is enabled, or bt coex + * activity grade allows it and we are on 2.4Ghz. + */ + if ((cam || sta_ps_disabled) && + !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band)) + return false; + + IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type); + if (is_legacy(rate)) + return index == IWL_RATE_54M_INDEX; + if (is_ht(rate)) + return index == IWL_RATE_MCS_7_INDEX; + if (is_vht(rate)) + return index == IWL_RATE_MCS_7_INDEX || + index == IWL_RATE_MCS_8_INDEX || + index == IWL_RATE_MCS_9_INDEX; + + WARN_ON_ONCE(1); + return false; +} + +enum tpc_action { + TPC_ACTION_STAY, + TPC_ACTION_DECREASE, + TPC_ACTION_INCREASE, + TPC_ACTION_NO_RESTIRCTION, +}; + +static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, + s32 sr, int weak, int strong, + int current_tpt, + int weak_tpt, int strong_tpt) +{ + /* stay until we have valid tpt */ + if (current_tpt == IWL_INVALID_VALUE) { + IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n"); + return TPC_ACTION_STAY; + } + + /* Too many failures, increase txp */ + if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) || + current_tpt == 0) { + IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n"); + return TPC_ACTION_NO_RESTIRCTION; + } + + /* try decreasing first if applicable */ + if (weak != TPC_INVALID) { + if (weak_tpt == IWL_INVALID_VALUE && + (strong_tpt == IWL_INVALID_VALUE || + current_tpt >= strong_tpt)) { + IWL_DEBUG_RATE(mvm, + "no weak txp measurement. decrease txp\n"); + return TPC_ACTION_DECREASE; + } + + if (weak_tpt > current_tpt) { + IWL_DEBUG_RATE(mvm, + "lower txp has better tpt. decrease txp\n"); + return TPC_ACTION_DECREASE; + } + } + + /* next, increase if needed */ + if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && + strong != TPC_INVALID) { + if (weak_tpt == IWL_INVALID_VALUE && + strong_tpt != IWL_INVALID_VALUE && + current_tpt < strong_tpt) { + IWL_DEBUG_RATE(mvm, + "higher txp has better tpt. increase txp\n"); + return TPC_ACTION_INCREASE; + } + + if (weak_tpt < current_tpt && + (strong_tpt == IWL_INVALID_VALUE || + strong_tpt > current_tpt)) { + IWL_DEBUG_RATE(mvm, + "lower txp has worse tpt. increase txp\n"); + return TPC_ACTION_INCREASE; + } + } + + IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n"); + return TPC_ACTION_STAY; +} + +static bool rs_tpc_perform(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mvm_sta->vif; + struct ieee80211_chanctx_conf *chanctx_conf; + enum ieee80211_band band; + struct iwl_rate_scale_data *window; + struct rs_rate *rate = &tbl->rate; + enum tpc_action action; + s32 sr; + u8 cur = lq_sta->lq.reduced_tpc; + int current_tpt; + int weak, strong; + int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE; + +#ifdef CONFIG_MAC80211_DEBUGFS + if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) { + IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n", + lq_sta->pers.dbg_fixed_txp_reduction); + lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction; + return cur != lq_sta->pers.dbg_fixed_txp_reduction; + } +#endif + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + if (WARN_ON(!chanctx_conf)) + band = IEEE80211_NUM_BANDS; + else + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + if (!rs_tpc_allowed(mvm, vif, rate, band)) { + IWL_DEBUG_RATE(mvm, + "tpc is not allowed. remove txp restrictions\n"); + lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; + return cur != TPC_NO_REDUCTION; + } + + rs_get_adjacent_txp(mvm, cur, &weak, &strong); + + /* Collect measured throughputs for current and adjacent rates */ + window = tbl->tpc_win; + sr = window[cur].success_ratio; + current_tpt = window[cur].average_tpt; + if (weak != TPC_INVALID) + weak_tpt = window[weak].average_tpt; + if (strong != TPC_INVALID) + strong_tpt = window[strong].average_tpt; + + IWL_DEBUG_RATE(mvm, + "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n", + cur, current_tpt, sr, weak, strong, + weak_tpt, strong_tpt); + + action = rs_get_tpc_action(mvm, sr, weak, strong, + current_tpt, weak_tpt, strong_tpt); + + /* override actions if we are on the edge */ + if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) { + IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n"); + action = TPC_ACTION_STAY; + } else if (strong == TPC_INVALID && + (action == TPC_ACTION_INCREASE || + action == TPC_ACTION_NO_RESTIRCTION)) { + IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n"); + action = TPC_ACTION_STAY; + } + + switch (action) { + case TPC_ACTION_DECREASE: + lq_sta->lq.reduced_tpc = weak; + return true; + case TPC_ACTION_INCREASE: + lq_sta->lq.reduced_tpc = strong; + return true; + case TPC_ACTION_NO_RESTIRCTION: + lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; + return true; + case TPC_ACTION_STAY: + /* do nothing */ + break; + } + return false; +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void rs_rate_scale_perform(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + int tid) +{ + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + enum rs_action scale_action = RS_ACTION_STAY; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 prev_agg = lq_sta->is_agg; + struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data; + struct rs_rate *rate; + + lq_sta->is_agg = !!sta_priv->agg_tids; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + rate = &tbl->rate; + + if (prev_agg != lq_sta->is_agg) { + IWL_DEBUG_RATE(mvm, + "Aggregation changed: prev %d current %d. Update expected TPT table\n", + prev_agg, lq_sta->is_agg); + rs_set_expected_tpt_table(lq_sta, tbl); + rs_rate_scale_clear_tbl_windows(mvm, tbl); + } + + /* current tx rate */ + index = rate->index; + + /* rates available for this association, and for modulation mode */ + rate_mask = rs_get_supported_rates(lq_sta, rate); + + if (!(BIT(index) & rate_mask)) { + IWL_ERR(mvm, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + rate->type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + rs_update_rate_tbl(mvm, sta, lq_sta, tbl); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(mvm, "tbl->expected_tpt is NULL\n"); + return; + } + + /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(mvm, + "%s: Test Window: succ %d total %d\n", + rs_pretty_rate(rate), + window->success_counter, window->counter); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + rs_stay_in_table(lq_sta, false); + + return; + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + IWL_DEBUG_RATE(mvm, + "SWITCHING TO NEW TABLE SR: %d " + "cur-tpt %d old-tpt %d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + /* Else poor success; go back to mode in "active" table */ + } else { + IWL_DEBUG_RATE(mvm, + "GOING BACK TO THE OLD TABLE: SR %d " + "cur-tpt %d old-tpt %d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + rate->type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = tbl->rate.index; + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + IWL_DEBUG_RATE(mvm, + "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n", + rs_pretty_rate(rate), current_tpt, sr, + low, high, low_tpt, high_tpt); + + scale_action = rs_get_rate_action(mvm, tbl, sr, low, high, + current_tpt, low_tpt, high_tpt); + + /* Force a search in case BT doesn't like us being in MIMO */ + if (is_mimo(rate) && + !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) { + IWL_DEBUG_RATE(mvm, + "BT Coex forbids MIMO. Search for new config\n"); + rs_stay_in_table(lq_sta, true); + goto lq_update; + } + + switch (scale_action) { + case RS_ACTION_DOWNSCALE: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } else { + IWL_DEBUG_RATE(mvm, + "At the bottom rate. Can't decrease\n"); + } + + break; + case RS_ACTION_UPSCALE: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } else { + IWL_DEBUG_RATE(mvm, + "At the top rate. Can't increase\n"); + } + + break; + case RS_ACTION_STAY: + /* No change */ + if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) + update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl); + break; + default: + break; + } + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) { + tbl->rate.index = index; + if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) + rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); + rs_update_rate_tbl(mvm, sta, lq_sta, tbl); + } + + rs_stay_in_table(lq_sta, false); + + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && + lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED + && window->counter) { + enum rs_column next_column; + + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + IWL_DEBUG_RATE(mvm, + "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n", + update_lq, done_search, lq_sta->rs_state, + window->counter); + + next_column = rs_get_next_column(mvm, lq_sta, sta, tbl); + if (next_column != RS_COLUMN_INVALID) { + int ret = rs_switch_to_column(mvm, lq_sta, sta, + next_column); + if (!ret) + lq_sta->search_better_tbl = 1; + } else { + IWL_DEBUG_RATE(mvm, + "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n"); + lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED; + } + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + rs_rate_scale_clear_tbl_windows(mvm, tbl); + + /* Use new "search" start rate */ + index = tbl->rate.index; + + rs_dump_rate(mvm, &tbl->rate, + "Switch to SEARCH TABLE:"); + rs_update_rate_tbl(mvm, sta, lq_sta, tbl); + } else { + done_search = 1; + } + } + + if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(&tbl1->rate)) { + IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); + + if (tid != IWL_MAX_TID_COUNT) { + tid_data = &sta_priv->tid_data[tid]; + if (tid_data->state != IWL_AGG_OFF) { + IWL_DEBUG_RATE(mvm, + "Stop aggregation on tid %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } + rs_set_stay_in_table(mvm, 1, lq_sta); + } else { + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != IWL_MAX_TID_COUNT)) { + tid_data = &sta_priv->tid_data[tid]; + if (tid_data->state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(mvm, + "try to aggregate tid %d\n", + tid); + rs_tl_turn_on_agg(mvm, tid, + lq_sta, sta); + } + } + rs_set_stay_in_table(mvm, 0, lq_sta); + } + } +} + +struct rs_init_rate_info { + s8 rssi; + u8 rate_idx; +}; + +static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = { + { -60, IWL_RATE_54M_INDEX }, + { -64, IWL_RATE_48M_INDEX }, + { -68, IWL_RATE_36M_INDEX }, + { -80, IWL_RATE_24M_INDEX }, + { -84, IWL_RATE_18M_INDEX }, + { -85, IWL_RATE_12M_INDEX }, + { -86, IWL_RATE_11M_INDEX }, + { -88, IWL_RATE_5M_INDEX }, + { -90, IWL_RATE_2M_INDEX }, + { S8_MIN, IWL_RATE_1M_INDEX }, +}; + +static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = { + { -60, IWL_RATE_54M_INDEX }, + { -64, IWL_RATE_48M_INDEX }, + { -72, IWL_RATE_36M_INDEX }, + { -80, IWL_RATE_24M_INDEX }, + { -84, IWL_RATE_18M_INDEX }, + { -85, IWL_RATE_12M_INDEX }, + { -87, IWL_RATE_9M_INDEX }, + { S8_MIN, IWL_RATE_6M_INDEX }, +}; + +static const struct rs_init_rate_info rs_optimal_rates_ht[] = { + { -60, IWL_RATE_MCS_7_INDEX }, + { -64, IWL_RATE_MCS_6_INDEX }, + { -68, IWL_RATE_MCS_5_INDEX }, + { -72, IWL_RATE_MCS_4_INDEX }, + { -80, IWL_RATE_MCS_3_INDEX }, + { -84, IWL_RATE_MCS_2_INDEX }, + { -85, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX}, +}; + +static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { + { -60, IWL_RATE_MCS_8_INDEX }, + { -64, IWL_RATE_MCS_7_INDEX }, + { -68, IWL_RATE_MCS_6_INDEX }, + { -72, IWL_RATE_MCS_5_INDEX }, + { -80, IWL_RATE_MCS_4_INDEX }, + { -84, IWL_RATE_MCS_3_INDEX }, + { -85, IWL_RATE_MCS_2_INDEX }, + { -87, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX}, +}; + +static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = { + { -60, IWL_RATE_MCS_9_INDEX }, + { -64, IWL_RATE_MCS_8_INDEX }, + { -68, IWL_RATE_MCS_7_INDEX }, + { -72, IWL_RATE_MCS_6_INDEX }, + { -80, IWL_RATE_MCS_5_INDEX }, + { -84, IWL_RATE_MCS_4_INDEX }, + { -85, IWL_RATE_MCS_3_INDEX }, + { -87, IWL_RATE_MCS_2_INDEX }, + { -88, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX }, +}; + +/* Init the optimal rate based on STA caps + * This combined with rssi is used to report the last tx rate + * to userspace when we haven't transmitted enough frames. + */ +static void rs_init_optimal_rate(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct rs_rate *rate = &lq_sta->optimal_rate; + + if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID) + rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; + else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) + rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; + else if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate->type = LQ_LEGACY_A; + else + rate->type = LQ_LEGACY_G; + + rate->bw = rs_bw_from_sta_bw(sta); + rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL); + + /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */ + + if (is_mimo(rate)) { + lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate; + } else if (is_siso(rate)) { + lq_sta->optimal_rate_mask = lq_sta->active_siso_rate; + } else { + lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; + + if (lq_sta->band == IEEE80211_BAND_5GHZ) { + lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); + } else { + lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); + } + } + + if (is_vht(rate)) { + if (rate->bw == RATE_MCS_CHAN_WIDTH_20) { + lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_vht_20mhz); + } else { + lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); + } + } else if (is_ht(rate)) { + lq_sta->optimal_rates = rs_optimal_rates_ht; + lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht); + } +} + +/* Compute the optimal rate index based on RSSI */ +static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta) +{ + struct rs_rate *rate = &lq_sta->optimal_rate; + int i; + + rate->index = find_first_bit(&lq_sta->optimal_rate_mask, + BITS_PER_LONG); + + for (i = 0; i < lq_sta->optimal_nentries; i++) { + int rate_idx = lq_sta->optimal_rates[i].rate_idx; + + if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) && + (BIT(rate_idx) & lq_sta->optimal_rate_mask)) { + rate->index = rate_idx; + break; + } + } + + return rate; +} + +/* Choose an initial legacy rate and antenna to use based on the RSSI + * of last Rx + */ +static void rs_get_initial_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + enum ieee80211_band band, + struct rs_rate *rate) +{ + int i, nentries; + s8 best_rssi = S8_MIN; + u8 best_ant = ANT_NONE; + u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); + const struct rs_init_rate_info *initial_rates; + + for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { + if (!(lq_sta->pers.chains & BIT(i))) + continue; + + if (lq_sta->pers.chain_signal[i] > best_rssi) { + best_rssi = lq_sta->pers.chain_signal[i]; + best_ant = BIT(i); + } + } + + IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n", + rs_pretty_ant(best_ant), best_rssi); + + if (best_ant != ANT_A && best_ant != ANT_B) + rate->ant = first_antenna(valid_tx_ant); + else + rate->ant = best_ant; + + rate->sgi = false; + rate->ldpc = false; + rate->bw = RATE_MCS_CHAN_WIDTH_20; + + rate->index = find_first_bit(&lq_sta->active_legacy_rate, + BITS_PER_LONG); + + if (band == IEEE80211_BAND_5GHZ) { + rate->type = LQ_LEGACY_A; + initial_rates = rs_optimal_rates_5ghz_legacy; + nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); + } else { + rate->type = LQ_LEGACY_G; + initial_rates = rs_optimal_rates_24ghz_legacy; + nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); + } + + if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) { + for (i = 0; i < nentries; i++) { + int rate_idx = initial_rates[i].rate_idx; + if ((best_rssi >= initial_rates[i].rssi) && + (BIT(rate_idx) & lq_sta->active_legacy_rate)) { + rate->index = rate_idx; + break; + } + } + } + + IWL_DEBUG_RATE(mvm, "rate_idx %d ANT %s\n", rate->index, + rs_pretty_ant(rate->ant)); +} + +/* Save info about RSSI of last Rx */ +void rs_update_last_rssi(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_rx_status *rx_status) +{ + int i; + + lq_sta->pers.chains = rx_status->chains; + lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; + lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; + lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; + lq_sta->pers.last_rssi = S8_MIN; + + for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { + if (!(lq_sta->pers.chains & BIT(i))) + continue; + + if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi) + lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i]; + } +} + +/** + * rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void rs_initialize_lq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + enum ieee80211_band band, + bool init) +{ + struct iwl_scale_tbl_info *tbl; + struct rs_rate *rate; + u8 active_tbl = 0; + + if (!sta || !lq_sta) + return; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + rate = &tbl->rate; + + rs_get_initial_rate(mvm, lq_sta, band, rate); + rs_init_optimal_rate(mvm, sta, lq_sta); + + WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); + if (rate->ant == ANT_A) + tbl->column = RS_COLUMN_LEGACY_ANT_A; + else + tbl->column = RS_COLUMN_LEGACY_ANT_B; + + rs_set_expected_tpt_table(lq_sta, tbl); + rs_fill_lq_cmd(mvm, sta, lq_sta, rate); + /* TODO restore station should remember the lq cmd */ + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init); +} + +static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct sk_buff *skb = txrc->skb; + struct iwl_op_mode *op_mode __maybe_unused = + (struct iwl_op_mode *)mvm_r; + struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = mvm_sta; + struct rs_rate *optimal_rate; + u32 last_ucode_rate; + + if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { + /* if vif isn't initialized mvm doesn't know about + * this station, so don't do anything with the it + */ + sta = NULL; + mvm_sta = NULL; + } + + /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->pers.drv) { + IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); + mvm_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, mvm_sta, txrc)) + return; + + iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, + info->band, &info->control.rates[0]); + info->control.rates[0].count = 1; + + /* Report the optimal rate based on rssi and STA caps if we haven't + * converged yet (too little traffic) or exploring other modulations + */ + if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) { + optimal_rate = rs_get_optimal_rate(mvm, lq_sta); + last_ucode_rate = ucode_rate_from_rs_rate(mvm, + optimal_rate); + iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, + &txrc->reported_rate); + } +} + +static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; + + IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + + lq_sta->pers.drv = mvm; +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->pers.dbg_fixed_rate = 0; + lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID; + lq_sta->pers.ss_force = RS_SS_FORCE_NONE; +#endif + lq_sta->pers.chains = 0; + memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); + lq_sta->pers.last_rssi = S8_MIN; + + return &sta_priv->lq_sta; +} + +static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, + int nss) +{ + u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & + (0x3 << (2 * (nss - 1))); + rx_mcs >>= (2 * (nss - 1)); + + if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7) + return IWL_RATE_MCS_7_INDEX; + else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8) + return IWL_RATE_MCS_8_INDEX; + else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9) + return IWL_RATE_MCS_9_INDEX; + + WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED); + return -1; +} + +static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta, + struct ieee80211_sta_vht_cap *vht_cap, + struct iwl_lq_sta *lq_sta) +{ + int i; + int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1); + + if (highest_mcs >= IWL_RATE_MCS_0_INDEX) { + for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) { + if (i == IWL_RATE_9M_INDEX) + continue; + + /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ + if (i == IWL_RATE_MCS_9_INDEX && + sta->bandwidth == IEEE80211_STA_RX_BW_20) + continue; + + lq_sta->active_siso_rate |= BIT(i); + } + } + + if (sta->rx_nss < 2) + return; + + highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2); + if (highest_mcs >= IWL_RATE_MCS_0_INDEX) { + for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) { + if (i == IWL_RATE_9M_INDEX) + continue; + + /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ + if (i == IWL_RATE_MCS_9_INDEX && + sta->bandwidth == IEEE80211_STA_RX_BW_20) + continue; + + lq_sta->active_mimo2_rate |= BIT(i); + } + } +} + +static void rs_ht_init(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta_ht_cap *ht_cap) +{ + /* active_siso_rate mask includes 9 MBits (bit 5), + * and CCK (bits 0-3), supp_rates[] does not; + * shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + if (mvm->cfg->ht_params->ldpc && + (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) + lq_sta->ldpc = true; + + if (mvm->cfg->ht_params->stbc && + (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && + (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) + lq_sta->stbc_capable = true; + + lq_sta->is_vht = false; +} + +static void rs_vht_init(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + struct ieee80211_sta_vht_cap *vht_cap) +{ + rs_vht_set_enabled_rates(sta, vht_cap, lq_sta); + + if (mvm->cfg->ht_params->ldpc && + (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)) + lq_sta->ldpc = true; + + if (mvm->cfg->ht_params->stbc && + (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && + (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) + lq_sta->stbc_capable = true; + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && + (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && + (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) + lq_sta->bfer_capable = true; + + lq_sta->is_vht = true; +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) +{ + spin_lock_bh(&mvm->drv_stats_lock); + memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats)); + spin_unlock_bh(&mvm->drv_stats_lock); +} + +void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) +{ + u8 nss = 0, mcs = 0; + + spin_lock(&mvm->drv_stats_lock); + + if (agg) + mvm->drv_rx_stats.agg_frames++; + + mvm->drv_rx_stats.success_frames++; + + switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + mvm->drv_rx_stats.bw_20_frames++; + break; + case RATE_MCS_CHAN_WIDTH_40: + mvm->drv_rx_stats.bw_40_frames++; + break; + case RATE_MCS_CHAN_WIDTH_80: + mvm->drv_rx_stats.bw_80_frames++; + break; + default: + WARN_ONCE(1, "bad BW. rate 0x%x", rate); + } + + if (rate & RATE_MCS_HT_MSK) { + mvm->drv_rx_stats.ht_frames++; + mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; + nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; + } else if (rate & RATE_MCS_VHT_MSK) { + mvm->drv_rx_stats.vht_frames++; + mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; + nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + } else { + mvm->drv_rx_stats.legacy_frames++; + } + + if (nss == 1) + mvm->drv_rx_stats.siso_frames++; + else if (nss == 2) + mvm->drv_rx_stats.mimo2_frames++; + + if (rate & RATE_MCS_SGI_MSK) + mvm->drv_rx_stats.sgi_frames++; + else + mvm->drv_rx_stats.ngi_frames++; + + mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate; + mvm->drv_rx_stats.last_frame_idx = + (mvm->drv_rx_stats.last_frame_idx + 1) % + ARRAY_SIZE(mvm->drv_rx_stats.last_rates); + + spin_unlock(&mvm->drv_stats_lock); +} +#endif + +/* + * Called after adding a new station to initialize rate scaling + */ +void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum ieee80211_band band, bool init) +{ + int i, j; + struct ieee80211_hw *hw = mvm->hw; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; + struct ieee80211_supported_band *sband; + unsigned long supp; /* must be unsigned long for for_each_set_bit */ + + /* clear all non-persistent lq data */ + memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); + + sband = hw->wiphy->bands[band]; + + lq_sta->lq.sta_id = sta_priv->sta_id; + + for (j = 0; j < LQ_SIZE; j++) + rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); + + lq_sta->flush_timer = 0; + lq_sta->last_tx = jiffies; + + IWL_DEBUG_RATE(mvm, + "LQ: *** rate scale station global init for station %d ***\n", + sta_priv->sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX; + lq_sta->band = sband->band; + /* + * active legacy rates as per supported rates bitmap + */ + supp = sta->supp_rates[sband->band]; + lq_sta->active_legacy_rate = 0; + for_each_set_bit(i, &supp, BITS_PER_LONG) + lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); + + /* TODO: should probably account for rx_highest for both HT/VHT */ + if (!vht_cap || !vht_cap->vht_supported) + rs_ht_init(mvm, sta, lq_sta, ht_cap); + else + rs_vht_init(mvm, sta, lq_sta, vht_cap); + + lq_sta->max_legacy_rate_idx = + rs_get_max_rate_from_mask(lq_sta->active_legacy_rate); + lq_sta->max_siso_rate_idx = + rs_get_max_rate_from_mask(lq_sta->active_siso_rate); + lq_sta->max_mimo2_rate_idx = + rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate); + + IWL_DEBUG_RATE(mvm, + "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n", + lq_sta->active_legacy_rate, + lq_sta->active_siso_rate, + lq_sta->active_mimo2_rate, + lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable, + lq_sta->bfer_capable); + IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", + lq_sta->max_legacy_rate_idx, + lq_sta->max_siso_rate_idx, + lq_sta->max_mimo2_rate_idx); + + /* These values will be overridden later */ + lq_sta->lq.single_stream_ant_msk = + first_antenna(iwl_mvm_get_valid_tx_ant(mvm)); + lq_sta->lq.dual_stream_ant_msk = ANT_AB; + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->is_agg = 0; +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_mvm_reset_frame_stats(mvm); +#endif + rs_initialize_lq(mvm, sta, lq_sta, band, init); +} + +static void rs_rate_update(void *mvm_r, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed) +{ + u8 tid; + struct iwl_op_mode *op_mode = + (struct iwl_op_mode *)mvm_r; + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + if (!iwl_mvm_sta_from_mac80211(sta)->vif) + return; + + /* Stop any ongoing aggregations as rs starts off assuming no agg */ + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + ieee80211_stop_tx_ba_session(sta, tid); + + iwl_mvm_rs_rate_init(mvm, sta, sband->band, false); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, + struct iwl_lq_cmd *lq_cmd, + enum ieee80211_band band, + u32 ucode_rate) +{ + struct rs_rate rate; + int i; + int num_rates = ARRAY_SIZE(lq_cmd->rs_table); + __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate); + u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; + + for (i = 0; i < num_rates; i++) + lq_cmd->rs_table[i] = ucode_rate_le32; + + rs_rate_from_ucode_rate(ucode_rate, band, &rate); + + if (is_mimo(&rate)) + lq_cmd->mimo_delim = num_rates - 1; + else + lq_cmd->mimo_delim = 0; + + lq_cmd->reduced_tpc = 0; + + if (num_of_ant(ant) == 1) + lq_cmd->single_stream_ant_msk = ant; + + lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; +} +#endif /* CONFIG_MAC80211_DEBUGFS */ + +static void rs_fill_rates_for_column(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct rs_rate *rate, + __le32 *rs_table, int *rs_table_index, + int num_rates, int num_retries, + u8 valid_tx_ant, bool toggle_ant) +{ + int i, j; + __le32 ucode_rate; + bool bottom_reached = false; + int prev_rate_idx = rate->index; + int end = LINK_QUAL_MAX_RETRY_NUM; + int index = *rs_table_index; + + for (i = 0; i < num_rates && index < end; i++) { + for (j = 0; j < num_retries && index < end; j++, index++) { + ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, + rate)); + rs_table[index] = ucode_rate; + if (toggle_ant) + rs_toggle_antenna(valid_tx_ant, rate); + } + + prev_rate_idx = rate->index; + bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate); + if (bottom_reached && !is_legacy(rate)) + break; + } + + if (!bottom_reached && !is_legacy(rate)) + rate->index = prev_rate_idx; + + *rs_table_index = index; +} + +/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI + * column the rate table should look like this: + * + * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI + * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI + * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI + * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI + * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI + * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI + * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI + * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI + * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI + * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps + * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps + * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps + * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps + * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps + * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps + * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps + */ +static void rs_build_rates_table(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + const struct rs_rate *initial_rate) +{ + struct rs_rate rate; + int num_rates, num_retries, index = 0; + u8 valid_tx_ant = 0; + struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; + bool toggle_ant = false; + + memcpy(&rate, initial_rate, sizeof(rate)); + + valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); + + /* TODO: remove old API when min FW API hits 14 */ + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) && + rs_stbc_allow(mvm, sta, lq_sta)) + rate.stbc = true; + + if (is_siso(&rate)) { + num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES; + num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; + } else if (is_mimo(&rate)) { + num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES; + num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; + } else { + num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES; + num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES; + toggle_ant = true; + } + + rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, + num_rates, num_retries, valid_tx_ant, + toggle_ant); + + rs_get_lower_rate_down_column(lq_sta, &rate); + + if (is_siso(&rate)) { + num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES; + num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES; + lq_cmd->mimo_delim = index; + } else if (is_legacy(&rate)) { + num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; + num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES; + } else { + WARN_ON_ONCE(1); + } + + toggle_ant = true; + + rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, + num_rates, num_retries, valid_tx_ant, + toggle_ant); + + rs_get_lower_rate_down_column(lq_sta, &rate); + + num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; + num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES; + + rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, + num_rates, num_retries, valid_tx_ant, + toggle_ant); + +} + +struct rs_bfer_active_iter_data { + struct ieee80211_sta *exclude_sta; + struct iwl_mvm_sta *bfer_mvmsta; +}; + +static void rs_bfer_active_iter(void *_data, + struct ieee80211_sta *sta) +{ + struct rs_bfer_active_iter_data *data = _data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq; + u32 ss_params = le32_to_cpu(lq_cmd->ss_params); + + if (sta == data->exclude_sta) + return; + + /* The current sta has BFER allowed */ + if (ss_params & LQ_SS_BFER_ALLOWED) { + WARN_ON_ONCE(data->bfer_mvmsta != NULL); + + data->bfer_mvmsta = mvmsta; + } +} + +static int rs_bfer_priority(struct iwl_mvm_sta *sta) +{ + int prio = -1; + enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif); + + switch (viftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + prio = 3; + break; + case NL80211_IFTYPE_P2P_CLIENT: + prio = 2; + break; + case NL80211_IFTYPE_STATION: + prio = 1; + break; + default: + WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id); + prio = -1; + } + + return prio; +} + +/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */ +static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1, + struct iwl_mvm_sta *sta2) +{ + int prio1 = rs_bfer_priority(sta1); + int prio2 = rs_bfer_priority(sta2); + + if (prio1 > prio2) + return 1; + if (prio1 < prio2) + return -1; + return 0; +} + +static void rs_set_lq_ss_params(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + const struct rs_rate *initial_rate) +{ + struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct rs_bfer_active_iter_data data = { + .exclude_sta = sta, + .bfer_mvmsta = NULL, + }; + struct iwl_mvm_sta *bfer_mvmsta = NULL; + u32 ss_params = LQ_SS_PARAMS_VALID; + + if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) + goto out; + +#ifdef CONFIG_MAC80211_DEBUGFS + /* Check if forcing the decision is configured. + * Note that SISO is forced by not allowing STBC or BFER + */ + if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC) + ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE); + else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER) + ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE); + + if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) { + IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n", + lq_sta->pers.ss_force); + goto out; + } +#endif + + if (lq_sta->stbc_capable) + ss_params |= LQ_SS_STBC_1SS_ALLOWED; + + if (!lq_sta->bfer_capable) + goto out; + + ieee80211_iterate_stations_atomic(mvm->hw, + rs_bfer_active_iter, + &data); + bfer_mvmsta = data.bfer_mvmsta; + + /* This code is safe as it doesn't run concurrently for different + * stations. This is guaranteed by the fact that calls to + * ieee80211_tx_status wouldn't run concurrently for a single HW. + */ + if (!bfer_mvmsta) { + IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n"); + + ss_params |= LQ_SS_BFER_ALLOWED; + goto out; + } + + IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n", + bfer_mvmsta->sta_id); + + /* Disallow BFER on another STA if active and we're a higher priority */ + if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) { + struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq; + u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params); + + bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; + bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params); + iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false); + + ss_params |= LQ_SS_BFER_ALLOWED; + IWL_DEBUG_RATE(mvm, + "Lower priority BFER sta found (%d). Switch BFER\n", + bfer_mvmsta->sta_id); + } +out: + lq_cmd->ss_params = cpu_to_le32(ss_params); +} + +static void rs_fill_lq_cmd(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + const struct rs_rate *initial_rate) +{ + struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_vif *mvmvif; + + lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START; + lq_cmd->agg_time_limit = + cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT); + +#ifdef CONFIG_MAC80211_DEBUGFS + if (lq_sta->pers.dbg_fixed_rate) { + rs_build_rates_table_from_fixed(mvm, lq_cmd, + lq_sta->band, + lq_sta->pers.dbg_fixed_rate); + return; + } +#endif + if (WARN_ON_ONCE(!sta || !initial_rate)) + return; + + rs_build_rates_table(mvm, sta, lq_sta, initial_rate); + + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) + rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate); + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (num_of_ant(initial_rate->ant) == 1) + lq_cmd->single_stream_ant_msk = initial_rate->ant; + + lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + + /* + * In case of low latency, tell the firmware to leave a frame in the + * Tx Fifo so that it can start a transaction in the same TxOP. This + * basically allows the firmware to send bursts. + */ + if (iwl_mvm_vif_low_latency(mvmvif)) { + lq_cmd->agg_frame_cnt_limit--; + + if (mvm->low_latency_agg_frame_limit) + lq_cmd->agg_frame_cnt_limit = + min(lq_cmd->agg_frame_cnt_limit, + mvm->low_latency_agg_frame_limit); + } + + if (mvmsta->vif->p2p) + lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK; + + lq_cmd->agg_time_limit = + cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); +} + +static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void rs_free(void *mvm_rate) +{ + return; +} + +static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, + void *mvm_sta) +{ + struct iwl_op_mode *op_mode __maybe_unused = mvm_r; + struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); + + IWL_DEBUG_RATE(mvm, "enter\n"); + IWL_DEBUG_RATE(mvm, "leave\n"); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +int rs_pretty_print_rate(char *buf, const u32 rate) +{ + + char *type, *bw; + u8 mcs = 0, nss = 0; + u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; + + if (!(rate & RATE_MCS_HT_MSK) && + !(rate & RATE_MCS_VHT_MSK)) { + int index = iwl_hwrate_to_plcp_idx(rate); + + return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n", + rs_pretty_ant(ant), + index == IWL_RATE_INVALID ? "BAD" : + iwl_rate_mcs[index].mbps); + } + + if (rate & RATE_MCS_VHT_MSK) { + type = "VHT"; + mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; + nss = ((rate & RATE_VHT_MCS_NSS_MSK) + >> RATE_VHT_MCS_NSS_POS) + 1; + } else if (rate & RATE_MCS_HT_MSK) { + type = "HT"; + mcs = rate & RATE_HT_MCS_INDEX_MSK; + } else { + type = "Unknown"; /* shouldn't happen */ + } + + switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + bw = "20Mhz"; + break; + case RATE_MCS_CHAN_WIDTH_40: + bw = "40Mhz"; + break; + case RATE_MCS_CHAN_WIDTH_80: + bw = "80Mhz"; + break; + case RATE_MCS_CHAN_WIDTH_160: + bw = "160Mhz"; + break; + default: + bw = "BAD BW"; + } + + return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n", + type, rs_pretty_ant(ant), bw, mcs, nss, + (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", + (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "", + (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", + (rate & RATE_MCS_BF_MSK) ? "BF " : "", + (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : ""); +} + +/** + * Program the device to use fixed rate for frame transmit + * This is for debugging/testing only + * once the device start use fixed rate, we need to reload the module + * to being back the normal operation. + */ +static void rs_program_fix_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta) +{ + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate); + + if (lq_sta->pers.dbg_fixed_rate) { + rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); + iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false); + } +} + +static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_mvm *mvm; + char buf[64]; + size_t buf_size; + u32 parsed_rate; + + mvm = lq_sta->pers.drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->pers.dbg_fixed_rate = parsed_rate; + else + lq_sta->pers.dbg_fixed_rate = 0; + + rs_program_fix_rate(mvm, lq_sta); + + return count; +} + +static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_mvm *mvm; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct rs_rate *rate = &tbl->rate; + u32 ss_params; + mvm = lq_sta->pers.drv; + buff = kmalloc(2048, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->pers.dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(rate)) ? "legacy" : + is_vht(rate) ? "VHT" : "HT"); + if (!is_legacy(rate)) { + desc += sprintf(buff + desc, " %s", + (is_siso(rate)) ? "SISO" : "MIMO2"); + desc += sprintf(buff + desc, " %s", + (is_ht20(rate)) ? "20MHz" : + (is_ht40(rate)) ? "40MHz" : + (is_ht80(rate)) ? "80Mhz" : "BAD BW"); + desc += sprintf(buff + desc, " %s %s %s\n", + (rate->sgi) ? "SGI" : "NGI", + (rate->ldpc) ? "LDPC" : "BCC", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, + "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n", + lq_sta->lq.flags, + lq_sta->lq.mimo_delim, + lq_sta->lq.single_stream_ant_msk, + lq_sta->lq.dual_stream_ant_msk); + + desc += sprintf(buff+desc, + "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_time_limit), + lq_sta->lq.agg_disable_start_th, + lq_sta->lq.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); + ss_params = le32_to_cpu(lq_sta->lq.ss_params); + desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n", + (ss_params & LQ_SS_PARAMS_VALID) ? + "VALID" : "INVALID", + (ss_params & LQ_SS_BFER_ALLOWED) ? + ", BFER" : "", + (ss_params & LQ_SS_STBC_1SS_ALLOWED) ? + ", STBC" : "", + (ss_params & LQ_SS_FORCE) ? + ", FORCE" : ""); + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.initial_rate_index[0], + lq_sta->lq.initial_rate_index[1], + lq_sta->lq.initial_rate_index[2], + lq_sta->lq.initial_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]); + + desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r); + desc += rs_pretty_print_rate(buff+desc, r); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = rs_sta_dbgfs_scale_table_write, + .read = rs_sta_dbgfs_scale_table_read, + .open = simple_open, + .llseek = default_llseek, +}; +static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + struct iwl_scale_tbl_info *tbl; + struct rs_rate *rate; + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + tbl = &(lq_sta->lq_info[i]); + rate = &tbl->rate; + desc += sprintf(buff+desc, + "%s type=%d SGI=%d BW=%s DUP=0\n" + "index=%d\n", + lq_sta->active_tbl == i ? "*" : "x", + rate->type, + rate->sgi, + is_ht20(rate) ? "20Mhz" : + is_ht40(rate) ? "40Mhz" : + is_ht80(rate) ? "80Mhz" : "ERR", + rate->index); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + tbl->win[j].counter, + tbl->win[j].success_counter, + tbl->win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = rs_sta_dbgfs_stats_table_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + static const char * const column_name[] = { + [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A", + [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B", + [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A", + [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B", + [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI", + [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI", + [RS_COLUMN_MIMO2] = "MIMO2", + [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI", + }; + + static const char * const rate_name[] = { + [IWL_RATE_1M_INDEX] = "1M", + [IWL_RATE_2M_INDEX] = "2M", + [IWL_RATE_5M_INDEX] = "5.5M", + [IWL_RATE_11M_INDEX] = "11M", + [IWL_RATE_6M_INDEX] = "6M|MCS0", + [IWL_RATE_9M_INDEX] = "9M", + [IWL_RATE_12M_INDEX] = "12M|MCS1", + [IWL_RATE_18M_INDEX] = "18M|MCS2", + [IWL_RATE_24M_INDEX] = "24M|MCS3", + [IWL_RATE_36M_INDEX] = "36M|MCS4", + [IWL_RATE_48M_INDEX] = "48M|MCS5", + [IWL_RATE_54M_INDEX] = "54M|MCS6", + [IWL_RATE_MCS_7_INDEX] = "MCS7", + [IWL_RATE_MCS_8_INDEX] = "MCS8", + [IWL_RATE_MCS_9_INDEX] = "MCS9", + }; + + char *buff, *pos, *endpos; + int col, rate; + ssize_t ret; + struct iwl_lq_sta *lq_sta = file->private_data; + struct rs_rate_stats *stats; + static const size_t bufsz = 1024; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + pos = buff; + endpos = pos + bufsz; + + pos += scnprintf(pos, endpos - pos, "COLUMN,"); + for (rate = 0; rate < IWL_RATE_COUNT; rate++) + pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]); + pos += scnprintf(pos, endpos - pos, "\n"); + + for (col = 0; col < RS_COLUMN_COUNT; col++) { + pos += scnprintf(pos, endpos - pos, + "%s,", column_name[col]); + + for (rate = 0; rate < IWL_RATE_COUNT; rate++) { + stats = &(lq_sta->pers.tx_stats[col][rate]); + pos += scnprintf(pos, endpos - pos, + "%llu/%llu,", + stats->success, + stats->total); + } + pos += scnprintf(pos, endpos - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + return ret; +} + +static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats)); + + return count; +} + +static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = { + .read = rs_sta_dbgfs_drv_tx_stats_read, + .write = rs_sta_dbgfs_drv_tx_stats_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t iwl_dbgfs_ss_force_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + char buf[12]; + int bufsz = sizeof(buf); + int pos = 0; + static const char * const ss_force_name[] = { + [RS_SS_FORCE_NONE] = "none", + [RS_SS_FORCE_STBC] = "stbc", + [RS_SS_FORCE_BFER] = "bfer", + [RS_SS_FORCE_SISO] = "siso", + }; + + pos += scnprintf(buf+pos, bufsz-pos, "%s\n", + ss_force_name[lq_sta->pers.ss_force]); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = lq_sta->pers.drv; + int ret = 0; + + if (!strncmp("none", buf, 4)) { + lq_sta->pers.ss_force = RS_SS_FORCE_NONE; + } else if (!strncmp("siso", buf, 4)) { + lq_sta->pers.ss_force = RS_SS_FORCE_SISO; + } else if (!strncmp("stbc", buf, 4)) { + if (lq_sta->stbc_capable) { + lq_sta->pers.ss_force = RS_SS_FORCE_STBC; + } else { + IWL_ERR(mvm, + "can't force STBC. peer doesn't support\n"); + ret = -EINVAL; + } + } else if (!strncmp("bfer", buf, 4)) { + if (lq_sta->bfer_capable) { + lq_sta->pers.ss_force = RS_SS_FORCE_BFER; + } else { + IWL_ERR(mvm, + "can't force BFER. peer doesn't support\n"); + ret = -EINVAL; + } + } else { + IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n"); + ret = -EINVAL; + } + return ret ?: count; +} + +#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta) +#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, lq_sta, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) + +MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); + +static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_mvm_sta *mvmsta; + + mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta); + + if (!mvmsta->vif) + return; + + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops); + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir, + &lq_sta->pers.dbg_fixed_txp_reduction); + + MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR); + return; +err: + IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n"); +} + +static void rs_remove_debugfs(void *mvm, void *mvm_sta) +{ +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void rs_rate_init_stub(void *mvm_r, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *mvm_sta) +{ +} + +static const struct rate_control_ops rs_mvm_ops = { + .name = RS_NAME, + .tx_status = rs_mac80211_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init_stub, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, + .rate_update = rs_rate_update, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rs_add_debugfs, + .remove_sta_debugfs = rs_remove_debugfs, +#endif +}; + +int iwl_mvm_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_mvm_ops); +} + +void iwl_mvm_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_mvm_ops); +} + +/** + * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable + * Tx protection, according to this request and previous requests, + * and send the LQ command. + * @mvmsta: The station + * @enable: Enable Tx protection? + */ +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable) +{ + struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq; + + lockdep_assert_held(&mvm->mutex); + + if (enable) { + if (mvmsta->tx_protection == 0) + lq->flags |= LQ_FLAG_USE_RTS_MSK; + mvmsta->tx_protection++; + } else { + mvmsta->tx_protection--; + if (mvmsta->tx_protection == 0) + lq->flags &= ~LQ_FLAG_USE_RTS_MSK; + } + + return iwl_mvm_send_lq_cmd(mvm, lq, false); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h new file mode 100644 index 000000000000..81314ad9ebe0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -0,0 +1,392 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __rs_h__ +#define __rs_h__ + +#include + +#include "iwl-config.h" + +#include "fw-api.h" +#include "iwl-trans.h" + +struct iwl_rs_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 plcp_vht_siso; + u8 plcp_vht_mimo2; + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ +}; + +#define IWL_RATE_60M_PLCP 3 + +enum { + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +#define LINK_QUAL_MAX_RETRY_NUM 16 + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + + +/* uCode API values for HT/VHT bit rates */ +enum { + IWL_RATE_HT_SISO_MCS_0_PLCP = 0, + IWL_RATE_HT_SISO_MCS_1_PLCP = 1, + IWL_RATE_HT_SISO_MCS_2_PLCP = 2, + IWL_RATE_HT_SISO_MCS_3_PLCP = 3, + IWL_RATE_HT_SISO_MCS_4_PLCP = 4, + IWL_RATE_HT_SISO_MCS_5_PLCP = 5, + IWL_RATE_HT_SISO_MCS_6_PLCP = 6, + IWL_RATE_HT_SISO_MCS_7_PLCP = 7, + IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8, + IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9, + IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA, + IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB, + IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC, + IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD, + IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE, + IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF, + IWL_RATE_VHT_SISO_MCS_0_PLCP = 0, + IWL_RATE_VHT_SISO_MCS_1_PLCP = 1, + IWL_RATE_VHT_SISO_MCS_2_PLCP = 2, + IWL_RATE_VHT_SISO_MCS_3_PLCP = 3, + IWL_RATE_VHT_SISO_MCS_4_PLCP = 4, + IWL_RATE_VHT_SISO_MCS_5_PLCP = 5, + IWL_RATE_VHT_SISO_MCS_6_PLCP = 6, + IWL_RATE_VHT_SISO_MCS_7_PLCP = 7, + IWL_RATE_VHT_SISO_MCS_8_PLCP = 8, + IWL_RATE_VHT_SISO_MCS_9_PLCP = 9, + IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10, + IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11, + IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12, + IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13, + IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14, + IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15, + IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16, + IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17, + IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18, + IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19, + IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, + IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, +}; + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) + +#define IWL_INVALID_VALUE -1 + +#define TPC_MAX_REDUCTION 15 +#define TPC_NO_REDUCTION 0 +#define TPC_INVALID 0xff + +#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_ALL_TID 0xff + +enum iwl_table_type { + LQ_NONE, + LQ_LEGACY_G, /* legacy types */ + LQ_LEGACY_A, + LQ_HT_SISO, /* HT types */ + LQ_HT_MIMO2, + LQ_VHT_SISO, /* VHT types */ + LQ_VHT_MIMO2, + LQ_MAX, +}; + +struct rs_rate { + int index; + enum iwl_table_type type; + u8 ant; + u32 bw; + bool sgi; + bool ldpc; + bool stbc; + bool bfer; +}; + + +#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \ + ((type) == LQ_LEGACY_A)) +#define is_type_ht_siso(type) ((type) == LQ_HT_SISO) +#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2) +#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO) +#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2) +#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type)) +#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type)) +#define is_type_mimo(type) (is_type_mimo2(type)) +#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type)) +#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type)) +#define is_type_a_band(type) ((type) == LQ_LEGACY_A) +#define is_type_g_band(type) ((type) == LQ_LEGACY_G) + +#define is_legacy(rate) is_type_legacy((rate)->type) +#define is_ht_siso(rate) is_type_ht_siso((rate)->type) +#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type) +#define is_vht_siso(rate) is_type_vht_siso((rate)->type) +#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type) +#define is_siso(rate) is_type_siso((rate)->type) +#define is_mimo2(rate) is_type_mimo2((rate)->type) +#define is_mimo(rate) is_type_mimo((rate)->type) +#define is_ht(rate) is_type_ht((rate)->type) +#define is_vht(rate) is_type_vht((rate)->type) +#define is_a_band(rate) is_type_a_band((rate)->type) +#define is_g_band(rate) is_type_g_band((rate)->type) + +#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20) +#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40) +#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ +}; + +/* Possible Tx columns + * Tx Column = a combo of legacy/siso/mimo x antenna x SGI + */ +enum rs_column { + RS_COLUMN_LEGACY_ANT_A = 0, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_SISO_ANT_A, + RS_COLUMN_SISO_ANT_B, + RS_COLUMN_SISO_ANT_A_SGI, + RS_COLUMN_SISO_ANT_B_SGI, + RS_COLUMN_MIMO2, + RS_COLUMN_MIMO2_SGI, + + RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI, + RS_COLUMN_COUNT = RS_COLUMN_LAST + 1, + RS_COLUMN_INVALID, +}; + +enum rs_ss_force_opt { + RS_SS_FORCE_NONE = 0, + RS_SS_FORCE_STBC, + RS_SS_FORCE_BFER, + RS_SS_FORCE_SISO, +}; + +/* Packet stats per rate */ +struct rs_rate_stats { + u64 success; + u64 total; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + struct rs_rate rate; + enum rs_column column; + const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ + /* per txpower-reduction history */ + struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1]; +}; + +enum { + RS_STATE_SEARCH_CYCLE_STARTED, + RS_STATE_SEARCH_CYCLE_ENDED, + RS_STATE_STAY_IN_COLUMN, +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 rs_state; /* RS_STATE_* */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u32 visited_columns; /* Bitmask marking which Tx columns were + * explored during a search cycle + */ + u64 last_tx; + bool is_vht; + bool ldpc; /* LDPC Rx is supported by the STA */ + bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */ + bool bfer_capable; /* Remote supports beamformee and we BFer */ + + enum ieee80211_band band; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + unsigned long active_legacy_rate; + unsigned long active_siso_rate; + unsigned long active_mimo2_rate; + + /* Highest rate per Tx mode */ + u8 max_legacy_rate_idx; + u8 max_siso_rate_idx; + u8 max_mimo2_rate_idx; + + /* Optimal rate based on RSSI and STA caps. + * Used only to reflect link speed to userspace. + */ + struct rs_rate optimal_rate; + unsigned long optimal_rate_mask; + const struct rs_init_rate_info *optimal_rates; + int optimal_nentries; + + u8 missed_rate_counter; + + struct iwl_lq_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + u8 tx_agg_tid_en; + + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; + + /* tx power reduce for this sta */ + int tpc_reduce; + + /* persistent fields - initialized only once - keep last! */ + struct lq_sta_pers { +#ifdef CONFIG_MAC80211_DEBUGFS + u32 dbg_fixed_rate; + u8 dbg_fixed_txp_reduction; + + /* force STBC/BFER/SISO for testing */ + enum rs_ss_force_opt ss_force; +#endif + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 last_rssi; + struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; + struct iwl_mvm *drv; + } pers; +}; + +/* Initialize station's rate scaling information after adding station */ +void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum ieee80211_band band, bool init); + +/* Notify RS about Tx status */ +void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, struct ieee80211_tx_info *info); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +int iwl_mvm_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +void iwl_mvm_rate_control_unregister(void); + +struct iwl_mvm_sta; + +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable); + +#endif /* __rs__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c new file mode 100644 index 000000000000..5b58f5320e8d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -0,0 +1,612 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#include +#include "iwl-trans.h" +#include "mvm.h" +#include "fw-api.h" + +/* + * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler + * + * Copies the phy information in mvm->last_phy_info, it will be used when the + * actual data will come from the fw in the next packet. + */ +void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); + mvm->ampdu_ref++; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { + spin_lock(&mvm->drv_stats_lock); + mvm->drv_rx_stats.ampdu_count++; + spin_unlock(&mvm->drv_stats_lock); + } +#endif +} + +/* + * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 + * + * Adds the rxb to a new skb and give it to mac80211 + */ +static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct napi_struct *napi, + struct sk_buff *skb, + struct ieee80211_hdr *hdr, u16 len, + u32 ampdu_status, u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) +{ + unsigned int hdrlen, fraglen; + + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr (including crypto if present, and + * an additional 8 bytes for SNAP/ethertype, see below) so that + * splice() or TCP coalesce are more efficient. + * + * Since, in addition, ieee80211_data_to_8023() always pull in at + * least 8 bytes (possibly more for mesh) we can do the same here + * to save the cost of doing it later. That still doesn't pull in + * the actual IP header since the typical case has a SNAP header. + * If the latter changes (there are efforts in the standards group + * to do so) we should revisit this and ieee80211_data_to_8023(). + */ + hdrlen = (len <= skb_tailroom(skb)) ? len : + sizeof(*hdr) + crypt_len + 8; + + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); + fraglen = len - hdrlen; + + if (fraglen) { + int offset = (void *)hdr + hdrlen - + rxb_addr(rxb) + rxb_offset(rxb); + + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, + fraglen, rxb->truesize); + } + + ieee80211_rx_napi(mvm->hw, skb, napi); +} + +/* + * iwl_mvm_get_signal_strength - use new rx PHY INFO API + * values are reported by the fw as positive values - need to negate + * to obtain their dBM. Account for missing antennas by replacing 0 + * values by -256dBm: practically 0 power and a non-feasible 8 bit value. + */ +static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, + struct iwl_rx_phy_info *phy_info, + struct ieee80211_rx_status *rx_status) +{ + int energy_a, energy_b, energy_c, max_energy; + u32 val; + + val = + le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); + energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> + IWL_RX_INFO_ENERGY_ANT_A_POS; + energy_a = energy_a ? -energy_a : S8_MIN; + energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> + IWL_RX_INFO_ENERGY_ANT_B_POS; + energy_b = energy_b ? -energy_b : S8_MIN; + energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> + IWL_RX_INFO_ENERGY_ANT_C_POS; + energy_c = energy_c ? -energy_c : S8_MIN; + max_energy = max(energy_a, energy_b); + max_energy = max(max_energy, energy_c); + + IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", + energy_a, energy_b, energy_c, max_energy); + + rx_status->signal = max_energy; + rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + rx_status->chain_signal[0] = energy_a; + rx_status->chain_signal[1] = energy_b; + rx_status->chain_signal[2] = energy_c; +} + +/* + * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format + * @mvm: the mvm object + * @hdr: 80211 header + * @stats: status in mac80211's format + * @rx_pkt_status: status coming from fw + * + * returns non 0 value if the packet should be dropped + */ +static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, + struct ieee80211_hdr *hdr, + struct ieee80211_rx_status *stats, + u32 rx_pkt_status, + u8 *crypt_len) +{ + if (!ieee80211_has_protected(hdr->frame_control) || + (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == + RX_MPDU_RES_STATUS_SEC_NO_ENC) + return 0; + + /* packet was encrypted with unknown alg */ + if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == + RX_MPDU_RES_STATUS_SEC_ENC_ERR) + return 0; + + switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { + case RX_MPDU_RES_STATUS_SEC_CCM_ENC: + /* alg is CCM: check MIC only */ + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) + return -1; + + stats->flag |= RX_FLAG_DECRYPTED; + *crypt_len = IEEE80211_CCMP_HDR_LEN; + return 0; + + case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: + /* Don't drop the frame and decrypt it in SW */ + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) + return 0; + *crypt_len = IEEE80211_TKIP_IV_LEN; + /* fall through if TTAK OK */ + + case RX_MPDU_RES_STATUS_SEC_WEP_ENC: + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) + return -1; + + stats->flag |= RX_FLAG_DECRYPTED; + if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == + RX_MPDU_RES_STATUS_SEC_WEP_ENC) + *crypt_len = IEEE80211_WEP_IV_LEN; + return 0; + + case RX_MPDU_RES_STATUS_SEC_EXT_ENC: + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) + return -1; + stats->flag |= RX_FLAG_DECRYPTED; + return 0; + + default: + IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); + } + + return 0; +} + +static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, + struct sk_buff *skb, + u32 status) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mvmvif->features & NETIF_F_RXCSUM && + status & RX_MPDU_RES_STATUS_CSUM_DONE && + status & RX_MPDU_RES_STATUS_CSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/* + * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler + * + * Handles the actual data of the Rx packet from the fw + */ +void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) +{ + struct ieee80211_hdr *hdr; + struct ieee80211_rx_status *rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_info *phy_info; + struct iwl_rx_mpdu_res_start *rx_res; + struct ieee80211_sta *sta; + struct sk_buff *skb; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + u32 rx_pkt_status; + u8 crypt_len = 0; + + phy_info = &mvm->last_phy_info; + rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; + hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); + len = le16_to_cpu(rx_res->byte_count); + rx_pkt_status = le32_to_cpup((__le32 *) + (pkt->data + sizeof(*rx_res) + len)); + + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + + rx_status = IEEE80211_SKB_RXCB(skb); + + /* + * drop the packet if it has failed being decrypted by HW + */ + if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, + &crypt_len)) { + IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", + rx_pkt_status); + kfree_skb(skb); + return; + } + + /* + * Keep packets with CRC errors (and with overrun) for monitor mode + * (otherwise the firmware discards them) but mark them as bad. + */ + if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || + !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { + IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status->mactime = le64_to_cpu(phy_info->timestamp); + rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); + rx_status->band = + (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status->freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), + rx_status->band); + /* + * TSF as indicated by the fw is at INA time, but mac80211 expects the + * TSF at the beginning of the MPDU. + */ + /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/ + + iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); + + IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal, + (unsigned long long)rx_status->mactime); + + rcu_read_lock(); + /* + * We have tx blocked stations (with CS bit). If we heard frames from + * a blocked station on a new channel we can TX to it again. + */ + if (unlikely(mvm->csa_tx_block_bcn_timeout)) { + sta = ieee80211_find_sta( + rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2); + if (sta) + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + } + + /* This is fine since we don't support multiple AP interfaces */ + sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); + + if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && + ieee80211_is_beacon(hdr->frame_control)) { + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; + bool trig_check; + s32 rssi; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, + FW_DBG_TRIGGER_RSSI); + rssi_trig = (void *)trig->data; + rssi = le32_to_cpu(rssi_trig->rssi); + + trig_check = + iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + trig); + if (trig_check && rx_status->signal < rssi) + iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + } + } + + if (sta && ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_csum(sta, skb, rx_pkt_status); + + rcu_read_unlock(); + + /* set the preamble flag if appropriate */ + if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) + rx_status->flag |= RX_FLAG_SHORTPRE; + + if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { + /* + * We know which subframes of an A-MPDU belong + * together since we get a single PHY response + * from the firmware for all of them + */ + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->ampdu_reference = mvm->ampdu_ref; + } + + /* Set up the HT phy flags */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->flag |= RX_FLAG_40MHZ; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; + break; + } + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status->flag |= RX_FLAG_SHORT_GI; + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + rx_status->flag |= RX_FLAG_HT_GF; + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->flag |= RX_FLAG_LDPC; + if (rate_n_flags & RATE_MCS_HT_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->vht_nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->flag |= RX_FLAG_VHT; + rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_BF_MSK) + rx_status->vht_flag |= RX_VHT_FLAG_BF; + } else { + rx_status->rate_idx = + iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_mvm_update_frame_stats(mvm, rate_n_flags, + rx_status->flag & RX_FLAG_AMPDU_DETAILS); +#endif + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, + crypt_len, rxb); +} + +static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, + struct mvm_statistics_rx *rx_stats) +{ + lockdep_assert_held(&mvm->mutex); + + mvm->rx_stats = *rx_stats; +} + +struct iwl_mvm_stat_data { + struct iwl_mvm *mvm; + __le32 mac_id; + u8 beacon_filter_average_energy; + struct mvm_statistics_general_v8 *general; +}; + +static void iwl_mvm_stat_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_stat_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + int sig = -data->beacon_filter_average_energy; + int last_event; + int thold = vif->bss_conf.cqm_rssi_thold; + int hyst = vif->bss_conf.cqm_rssi_hyst; + u16 id = le32_to_cpu(data->mac_id); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* This doesn't need the MAC ID check since it's not taking the + * data copied into the "data" struct, but rather the data from + * the notification directly. + */ + if (data->general) { + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(data->general->beacon_counter[mvmvif->id]); + mvmvif->beacon_stats.avg_signal = + -data->general->beacon_average_energy[mvmvif->id]; + } + + if (mvmvif->id != id) + return; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (sig == 0) { + IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); + return; + } + + mvmvif->bf_data.ave_beacon_signal = sig; + + /* BT Coex */ + if (mvmvif->bf_data.bt_coex_min_thold != + mvmvif->bf_data.bt_coex_max_thold) { + last_event = mvmvif->bf_data.last_bt_coex_event; + if (sig > mvmvif->bf_data.bt_coex_max_thold && + (last_event <= mvmvif->bf_data.bt_coex_min_thold || + last_event == 0)) { + mvmvif->bf_data.last_bt_coex_event = sig; + IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", + sig); + iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); + } else if (sig < mvmvif->bf_data.bt_coex_min_thold && + (last_event >= mvmvif->bf_data.bt_coex_max_thold || + last_event == 0)) { + mvmvif->bf_data.last_bt_coex_event = sig; + IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", + sig); + iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); + } + } + + if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) + return; + + /* CQM Notification */ + last_event = mvmvif->bf_data.last_cqm_event; + if (thold && sig < thold && (last_event == 0 || + sig < last_event - hyst)) { + mvmvif->bf_data.last_cqm_event = sig; + IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", + sig); + ieee80211_cqm_rssi_notify( + vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + GFP_KERNEL); + } else if (sig > thold && + (last_event == 0 || sig > last_event + hyst)) { + mvmvif->bf_data.last_cqm_event = sig; + IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", + sig); + ieee80211_cqm_rssi_notify( + vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + GFP_KERNEL); + } +} + +static inline void +iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_stats *trig_stats; + u32 trig_offset, trig_thold; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); + trig_stats = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + trig_offset = le32_to_cpu(trig_stats->stop_offset); + trig_thold = le32_to_cpu(trig_stats->stop_threshold); + + if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) + return; + + if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); +} + +void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; + struct iwl_mvm_stat_data data = { + .mvm = mvm, + }; + u32 temperature; + + if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats)) + goto invalid; + + temperature = le32_to_cpu(stats->general.radio_temperature); + data.mac_id = stats->rx.general.mac_id; + data.beacon_filter_average_energy = + stats->general.beacon_filter_average_energy; + + iwl_mvm_update_rx_statistics(mvm, &stats->rx); + + mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time); + mvm->radio_stats.on_time_rf = + le64_to_cpu(stats->general.on_time_rf); + mvm->radio_stats.on_time_scan = + le64_to_cpu(stats->general.on_time_scan); + + data.general = &stats->general; + + iwl_mvm_rx_stats_check_trigger(mvm, pkt); + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_stat_iterator, + &data); + return; + invalid: + IWL_ERR(mvm, "received invalid statistics size (%d)!\n", + iwl_rx_packet_payload_len(pkt)); +} + +void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c new file mode 100644 index 000000000000..d6e0c1b5c20c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -0,0 +1,1552 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include + +#include "mvm.h" +#include "fw-api-scan.h" + +#define IWL_DENSE_EBS_SCAN_RATIO 5 +#define IWL_SPARSE_EBS_SCAN_RATIO 1 + +enum iwl_mvm_scan_type { + IWL_SCAN_TYPE_UNASSOC, + IWL_SCAN_TYPE_WILD, + IWL_SCAN_TYPE_MILD, + IWL_SCAN_TYPE_FRAGMENTED, +}; + +enum iwl_mvm_traffic_load { + IWL_MVM_TRAFFIC_LOW, + IWL_MVM_TRAFFIC_MEDIUM, + IWL_MVM_TRAFFIC_HIGH, +}; + +struct iwl_mvm_scan_timing_params { + u32 dwell_active; + u32 dwell_passive; + u32 dwell_fragmented; + u32 suspend_time; + u32 max_out_time; +}; + +static struct iwl_mvm_scan_timing_params scan_timing[] = { + [IWL_SCAN_TYPE_UNASSOC] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 0, + .max_out_time = 0, + }, + [IWL_SCAN_TYPE_WILD] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 30, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_MILD] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 120, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_FRAGMENTED] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 95, + .max_out_time = 44, + }, +}; + +struct iwl_mvm_scan_params { + enum iwl_mvm_scan_type type; + u32 n_channels; + u16 delay; + int n_ssids; + struct cfg80211_ssid *ssids; + struct ieee80211_channel **channels; + u32 flags; + u8 *mac_addr; + u8 *mac_addr_mask; + bool no_cck; + bool pass_all; + int n_match_sets; + struct iwl_scan_probe_req preq; + struct cfg80211_match_set *match_sets; + int n_scan_plans; + struct cfg80211_sched_scan_plan *scan_plans; +}; + +static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) +{ + if (mvm->scan_rx_ant != ANT_NONE) + return mvm->scan_rx_ant; + return iwl_mvm_get_valid_rx_ant(mvm); +} + +static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) +{ + u16 rx_chain; + u8 rx_ant; + + rx_ant = iwl_mvm_scan_rx_ant(mvm); + rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; + return cpu_to_le16(rx_chain); +} + +static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band) +{ + if (band == IEEE80211_BAND_2GHZ) + return cpu_to_le32(PHY_BAND_24); + else + return cpu_to_le32(PHY_BAND_5); +} + +static inline __le32 +iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, + bool no_cck) +{ + u32 tx_ant; + + mvm->scan_last_antenna_idx = + iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), + mvm->scan_last_antenna_idx); + tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; + + if (band == IEEE80211_BAND_2GHZ && !no_cck) + return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | + tx_ant); + else + return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); +} + +static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int *global_cnt = data; + + if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && + mvmvif->phy_ctxt->id < MAX_PHYS) + *global_cnt += 1; +} + +static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) +{ + return IWL_MVM_TRAFFIC_LOW; +} + +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params) +{ + int global_cnt = 0; + enum iwl_mvm_traffic_load load; + bool low_latency; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_scan_condition_iterator, + &global_cnt); + if (!global_cnt) + return IWL_SCAN_TYPE_UNASSOC; + + load = iwl_mvm_get_traffic_load(mvm); + low_latency = iwl_mvm_low_latency(mvm); + + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) + return IWL_SCAN_TYPE_FRAGMENTED; + + if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) + return IWL_SCAN_TYPE_MILD; + + return IWL_SCAN_TYPE_WILD; +} + +static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) +{ + /* require rrm scan whenever the fw supports it */ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); +} + +static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) +{ + int max_probe_len; + + max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; + + /* we create the 802.11 header and SSID element */ + max_probe_len -= 24 + 2; + + /* DS parameter set element is added on 2.4GHZ band if required */ + if (iwl_mvm_rrm_scan_needed(mvm)) + max_probe_len -= 3; + + return max_probe_len; +} + +int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) +{ + int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm); + + /* TODO: [BUG] This function should return the maximum allowed size of + * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs + * in the same command. So the correct implementation of this function + * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan + * command has only 512 bytes and it would leave us with about 240 + * bytes for scan IEs, which is clearly not enough. So meanwhile + * we will report an incorrect value. This may result in a failure to + * issue a scan in unified_scan_lmac and unified_sched_scan_lmac + * functions with -ENOBUFS, if a large enough probe will be provided. + */ + return max_ie_len; +} + +static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, + int num_res, u8 *buf, size_t buf_size) +{ + int i; + u8 *pos = buf, *end = buf + buf_size; + + for (i = 0; pos < end && i < num_res; i++) + pos += snprintf(pos, end - pos, " %u", res[i].channel); + + /* terminate the string in case the buffer was too short */ + *(buf + buf_size - 1) = '\0'; + + return buf; +} + +void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; + u8 buf[256]; + + IWL_DEBUG_SCAN(mvm, + "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n", + notif->status, notif->scanned_channels, + iwl_mvm_dump_channel_list(notif->results, + notif->scanned_channels, buf, + sizeof(buf))); +} + +void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); + ieee80211_sched_scan_results(mvm->hw); +} + +static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) +{ + switch (status) { + case IWL_SCAN_EBS_SUCCESS: + return "successful"; + case IWL_SCAN_EBS_INACTIVE: + return "inactive"; + case IWL_SCAN_EBS_FAILED: + case IWL_SCAN_EBS_CHAN_NOT_FOUND: + default: + return "failed"; + } +} + +void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; + bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); + + /* scan status must be locked for proper checking */ + lockdep_assert_held(&mvm->mutex); + + /* We first check if we were stopping a scan, in which case we + * just clear the stopping flag. Then we check if it was a + * firmware initiated stop, in which case we need to inform + * mac80211. + * Note that we can have a stopping and a running scan + * simultaneously, but we can't have two different types of + * scans stopping or running at the same time (since LMAC + * doesn't support it). + */ + + if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) { + WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR); + + IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", + aborted ? "aborted" : "completed", + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + IWL_DEBUG_SCAN(mvm, + "Last line %d, Last iteration %d, Time after last iteration %d\n", + scan_notif->last_schedule_line, + scan_notif->last_schedule_iteration, + __le32_to_cpu(scan_notif->time_after_last_iter)); + + mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; + } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { + IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", + aborted ? "aborted" : "completed", + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + + mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; + } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { + WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); + + IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", + aborted ? "aborted" : "completed", + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + IWL_DEBUG_SCAN(mvm, + "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", + scan_notif->last_schedule_line, + scan_notif->last_schedule_iteration, + __le32_to_cpu(scan_notif->time_after_last_iter)); + + mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; + ieee80211_sched_scan_stopped(mvm->hw); + } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { + IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", + aborted ? "aborted" : "completed", + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + + mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; + ieee80211_scan_completed(mvm->hw, + scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + } + + mvm->last_ebs_successful = + scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || + scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; +} + +static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) +{ + int i; + + for (i = 0; i < PROBE_OPTION_MAX; i++) { + if (!ssid_list[i].len) + break; + if (ssid_list[i].len == ssid_len && + !memcmp(ssid_list->ssid, ssid, ssid_len)) + return i; + } + return -1; +} + +/* We insert the SSIDs in an inverted order, because the FW will + * invert it back. + */ +static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, + struct iwl_ssid_ie *ssids, + u32 *ssid_bitmap) +{ + int i, j; + int index; + + /* + * copy SSIDs from match list. + * iwl_config_sched_scan_profiles() uses the order of these ssids to + * config match list. + */ + for (i = 0, j = params->n_match_sets - 1; + j >= 0 && i < PROBE_OPTION_MAX; + i++, j--) { + /* skip empty SSID matchsets */ + if (!params->match_sets[j].ssid.ssid_len) + continue; + ssids[i].id = WLAN_EID_SSID; + ssids[i].len = params->match_sets[j].ssid.ssid_len; + memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid, + ssids[i].len); + } + + /* add SSIDs from scan SSID list */ + *ssid_bitmap = 0; + for (j = params->n_ssids - 1; + j >= 0 && i < PROBE_OPTION_MAX; + i++, j--) { + index = iwl_ssid_exist(params->ssids[j].ssid, + params->ssids[j].ssid_len, + ssids); + if (index < 0) { + ssids[i].id = WLAN_EID_SSID; + ssids[i].len = params->ssids[j].ssid_len; + memcpy(ssids[i].ssid, params->ssids[j].ssid, + ssids[i].len); + *ssid_bitmap |= BIT(i); + } else { + *ssid_bitmap |= BIT(index); + } + } +} + +static int +iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, + struct cfg80211_sched_scan_request *req) +{ + struct iwl_scan_offload_profile *profile; + struct iwl_scan_offload_profile_cfg *profile_cfg; + struct iwl_scan_offload_blacklist *blacklist; + struct iwl_host_cmd cmd = { + .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, + .len[1] = sizeof(*profile_cfg), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .dataflags[1] = IWL_HCMD_DFL_NOCOPY, + }; + int blacklist_len; + int i; + int ret; + + if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES)) + return -EIO; + + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) + blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; + else + blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; + + blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL); + if (!blacklist) + return -ENOMEM; + + profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL); + if (!profile_cfg) { + ret = -ENOMEM; + goto free_blacklist; + } + + cmd.data[0] = blacklist; + cmd.len[0] = sizeof(*blacklist) * blacklist_len; + cmd.data[1] = profile_cfg; + + /* No blacklist configuration */ + + profile_cfg->num_profiles = req->n_match_sets; + profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN; + profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN; + profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN; + if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) + profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN; + + for (i = 0; i < req->n_match_sets; i++) { + profile = &profile_cfg->profiles[i]; + profile->ssid_index = i; + /* Support any cipher and auth algorithm */ + profile->unicast_cipher = 0xff; + profile->auth_alg = 0xff; + profile->network_type = IWL_NETWORK_TYPE_ANY; + profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; + profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; + } + + IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n"); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + kfree(profile_cfg); +free_blacklist: + kfree(blacklist); + + return ret; +} + +static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, + struct cfg80211_sched_scan_request *req) +{ + if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { + IWL_DEBUG_SCAN(mvm, + "Sending scheduled scan with filtering, n_match_sets %d\n", + req->n_match_sets); + return false; + } + + IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); + return true; +} + +static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = SCAN_OFFLOAD_ABORT_CMD, + }; + u32 status; + + ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); + if (ret) + return ret; + + if (status != CAN_ABORT_STATUS) { + /* + * The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before the + * microcode has notified us that a scan is completed. + */ + IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status); + ret = -ENOENT; + } + + return ret; +} + +static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, + struct iwl_scan_req_tx_cmd *tx_cmd, + bool no_cck) +{ + tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | + TX_CMD_FLG_BT_DIS); + tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, + IEEE80211_BAND_2GHZ, + no_cck); + tx_cmd[0].sta_id = mvm->aux_sta.sta_id; + + tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | + TX_CMD_FLG_BT_DIS); + tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, + IEEE80211_BAND_5GHZ, + no_cck); + tx_cmd[1].sta_id = mvm->aux_sta.sta_id; +} + +static void +iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, + struct ieee80211_channel **channels, + int n_channels, u32 ssid_bitmap, + struct iwl_scan_req_lmac *cmd) +{ + struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data; + int i; + + for (i = 0; i < n_channels; i++) { + channel_cfg[i].channel_num = + cpu_to_le16(channels[i]->hw_value); + channel_cfg[i].iter_count = cpu_to_le16(1); + channel_cfg[i].iter_interval = 0; + channel_cfg[i].flags = + cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL | + ssid_bitmap); + } +} + +static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, + size_t len, u8 *const pos) +{ + static const u8 before_ds_params[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_REQUEST, + WLAN_EID_EXT_SUPP_RATES, + }; + size_t offs; + u8 *newpos = pos; + + if (!iwl_mvm_rrm_scan_needed(mvm)) { + memcpy(newpos, ies, len); + return newpos + len; + } + + offs = ieee80211_ie_split(ies, len, + before_ds_params, + ARRAY_SIZE(before_ds_params), + 0); + + memcpy(newpos, ies, offs); + newpos += offs; + + /* Add a placeholder for DS Parameter Set element */ + *newpos++ = WLAN_EID_DS_PARAMS; + *newpos++ = 1; + *newpos++ = 0; + + memcpy(newpos, ies + offs, len - offs); + newpos += len - offs; + + return newpos; +} + +static void +iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_scan_ies *ies, + struct iwl_mvm_scan_params *params) +{ + struct ieee80211_mgmt *frame = (void *)params->preq.buf; + u8 *pos, *newpos; + const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? + params->mac_addr : NULL; + + /* + * Unfortunately, right now the offload scan doesn't support randomising + * within the firmware, so until the firmware API is ready we implement + * it in the driver. This means that the scan iterations won't really be + * random, only when it's restarted, but at least that helps a bit. + */ + if (mac_addr) + get_random_mask_addr(frame->sa, mac_addr, + params->mac_addr_mask); + else + memcpy(frame->sa, vif->addr, ETH_ALEN); + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + eth_broadcast_addr(frame->da); + eth_broadcast_addr(frame->bssid); + frame->seq_ctrl = 0; + + pos = frame->u.probe_req.variable; + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + params->preq.mac_header.offset = 0; + params->preq.mac_header.len = cpu_to_le16(24 + 2); + + /* Insert ds parameter set element on 2.4 GHz band */ + newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, + ies->ies[IEEE80211_BAND_2GHZ], + ies->len[IEEE80211_BAND_2GHZ], + pos); + params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); + params->preq.band_data[0].len = cpu_to_le16(newpos - pos); + pos = newpos; + + memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], + ies->len[IEEE80211_BAND_5GHZ]); + params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); + params->preq.band_data[1].len = + cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]); + pos += ies->len[IEEE80211_BAND_5GHZ]; + + memcpy(pos, ies->common_ies, ies->common_ie_len); + params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); + params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); +} + +static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm, + enum iwl_scan_priority_ext prio) +{ + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY)) + return cpu_to_le32(prio); + + if (prio <= IWL_SCAN_PRIORITY_EXT_2) + return cpu_to_le32(IWL_SCAN_PRIORITY_LOW); + + if (prio <= IWL_SCAN_PRIORITY_EXT_4) + return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM); + + return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); +} + +static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, + struct iwl_scan_req_lmac *cmd, + struct iwl_mvm_scan_params *params) +{ + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); + cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); + cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); +} + +static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, + struct ieee80211_scan_ies *ies, + int n_channels) +{ + return ((n_ssids <= PROBE_OPTION_MAX) && + (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & + (ies->common_ie_len + + ies->len[NL80211_BAND_2GHZ] + + ies->len[NL80211_BAND_5GHZ] <= + iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); +} + +static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; + + /* We can only use EBS if: + * 1. the feature is supported; + * 2. the last EBS was successful; + * 3. if only single scan, the single scan EBS API is supported; + * 4. it's not a p2p find operation. + */ + return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && + mvm->last_ebs_successful && + vif->type != NL80211_IFTYPE_P2P_DEVICE); +} + +static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params) +{ + int flags = 0; + + if (params->n_ssids == 0) + flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; + + if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) + flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; + + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; + + if (iwl_mvm_rrm_scan_needed(mvm)) + flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED; + + if (params->pass_all) + flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; + else + flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->scan_iter_notif_enabled) + flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; +#endif + + return flags; +} + +static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params) +{ + struct iwl_scan_req_lmac *cmd = mvm->scan_cmd; + struct iwl_scan_probe_req *preq = + (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * + mvm->fw->ucode_capa.n_scan_channels); + u32 ssid_bitmap = 0; + int i; + + lockdep_assert_held(&mvm->mutex); + + memset(cmd, 0, ksize(cmd)); + + if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) + return -EINVAL; + + iwl_mvm_scan_lmac_dwell(mvm, cmd, params); + + cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); + cmd->iter_num = cpu_to_le32(1); + cmd->n_channels = (u8)params->n_channels; + + cmd->delay = cpu_to_le32(params->delay); + + cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params)); + + cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band); + cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | + MAC_FILTER_IN_BEACON); + iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck); + iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap); + + /* this API uses bits 1-20 instead of 0-19 */ + ssid_bitmap <<= 1; + + for (i = 0; i < params->n_scan_plans; i++) { + struct cfg80211_sched_scan_plan *scan_plan = + ¶ms->scan_plans[i]; + + cmd->schedule[i].delay = + cpu_to_le16(scan_plan->interval); + cmd->schedule[i].iterations = scan_plan->iterations; + cmd->schedule[i].full_scan_mul = 1; + } + + /* + * If the number of iterations of the last scan plan is set to + * zero, it should run infinitely. However, this is not always the case. + * For example, when regular scan is requested the driver sets one scan + * plan with one iteration. + */ + if (!cmd->schedule[i - 1].iterations) + cmd->schedule[i - 1].iterations = 0xff; + + if (iwl_mvm_scan_use_ebs(mvm, vif)) { + cmd->channel_opt[0].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[0].non_ebs_ratio = + cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); + cmd->channel_opt[1].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[1].non_ebs_ratio = + cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); + } + + iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels, + params->n_channels, ssid_bitmap, cmd); + + *preq = params->preq; + + return 0; +} + +static int rate_to_scan_rate_flag(unsigned int rate) +{ + static const int rate_to_scan_rate[IWL_RATE_COUNT] = { + [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M, + [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M, + [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M, + [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M, + [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M, + [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M, + [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M, + [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M, + [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M, + [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M, + [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M, + [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M, + }; + + return rate_to_scan_rate[rate]; +} + +static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) +{ + struct ieee80211_supported_band *band; + unsigned int rates = 0; + int i; + + band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + for (i = 0; i < band->n_bitrates; i++) + rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); + band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + for (i = 0; i < band->n_bitrates; i++) + rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); + + /* Set both basic rates and supported rates */ + rates |= SCAN_CONFIG_SUPPORTED_RATE(rates); + + return cpu_to_le32(rates); +} + +int iwl_mvm_config_scan(struct iwl_mvm *mvm) +{ + struct iwl_scan_config *scan_config; + struct ieee80211_supported_band *band; + int num_channels = + mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + + mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; + int ret, i, j = 0, cmd_size; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), + }; + + if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) + return -ENOBUFS; + + cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; + + scan_config = kzalloc(cmd_size, GFP_KERNEL); + if (!scan_config) + return -ENOMEM; + + scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | + SCAN_CONFIG_FLAG_SET_TX_CHAINS | + SCAN_CONFIG_FLAG_SET_RX_CHAINS | + SCAN_CONFIG_FLAG_SET_ALL_TIMES | + SCAN_CONFIG_FLAG_SET_LEGACY_RATES | + SCAN_CONFIG_FLAG_SET_MAC_ADDR | + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| + SCAN_CONFIG_N_CHANNELS(num_channels)); + scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); + scan_config->out_of_channel_time = cpu_to_le32(170); + scan_config->suspend_time = cpu_to_le32(30); + scan_config->dwell_active = 20; + scan_config->dwell_passive = 110; + scan_config->dwell_fragmented = 20; + + memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); + + scan_config->bcast_sta_id = mvm->aux_sta.sta_id; + scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS | + IWL_CHANNEL_FLAG_ACCURATE_EBS | + IWL_CHANNEL_FLAG_EBS_ADD | + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; + + band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + scan_config->channel_array[j] = band->channels[i].hw_value; + band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + scan_config->channel_array[j] = band->channels[i].hw_value; + + cmd.data[0] = scan_config; + cmd.len[0] = cmd_size; + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + + IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(scan_config); + return ret; +} + +static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) +{ + int i; + + for (i = 0; i < mvm->max_scans; i++) + if (mvm->scan_uid_status[i] == status) + return i; + + return -ENOENT; +} + +static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) +{ + return params->n_scan_plans == 1 && + params->scan_plans[0].iterations == 1; +} + +static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, + struct iwl_scan_req_umac *cmd, + struct iwl_mvm_scan_params *params) +{ + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); + cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); + cmd->scan_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); + + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2); +} + +static void +iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, + struct ieee80211_channel **channels, + int n_channels, u32 ssid_bitmap, + struct iwl_scan_req_umac *cmd) +{ + struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data; + int i; + + for (i = 0; i < n_channels; i++) { + channel_cfg[i].flags = cpu_to_le32(ssid_bitmap); + channel_cfg[i].channel_num = channels[i]->hw_value; + channel_cfg[i].iter_count = 1; + channel_cfg[i].iter_interval = 0; + } +} + +static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params) +{ + int flags = 0; + + if (params->n_ssids == 0) + flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; + + if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; + + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; + + if (iwl_mvm_rrm_scan_needed(mvm)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; + + if (params->pass_all) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; + else + flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; + + if (!iwl_mvm_is_regular_scan(params)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->scan_iter_notif_enabled) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; +#endif + return flags; +} + +static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, + int type) +{ + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + + sizeof(struct iwl_scan_channel_cfg_umac) * + mvm->fw->ucode_capa.n_scan_channels; + int uid, i; + u32 ssid_bitmap = 0; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) + return -EINVAL; + + uid = iwl_mvm_scan_uid_by_status(mvm, 0); + if (uid < 0) + return uid; + + memset(cmd, 0, ksize(cmd)); + + iwl_mvm_scan_umac_dwell(mvm, cmd, params); + + mvm->scan_uid_status[uid] = type; + + cmd->uid = cpu_to_le32(uid); + cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); + + if (type == IWL_MVM_SCAN_SCHED) + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); + + if (iwl_mvm_scan_use_ebs(mvm, vif)) + cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; + + cmd->n_channels = params->n_channels; + + iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); + + iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, + params->n_channels, ssid_bitmap, cmd); + + for (i = 0; i < params->n_scan_plans; i++) { + struct cfg80211_sched_scan_plan *scan_plan = + ¶ms->scan_plans[i]; + + sec_part->schedule[i].iter_count = scan_plan->iterations; + sec_part->schedule[i].interval = + cpu_to_le16(scan_plan->interval); + } + + /* + * If the number of iterations of the last scan plan is set to + * zero, it should run infinitely. However, this is not always the case. + * For example, when regular scan is requested the driver sets one scan + * plan with one iteration. + */ + if (!sec_part->schedule[i - 1].iter_count) + sec_part->schedule[i - 1].iter_count = 0xff; + + sec_part->delay = cpu_to_le16(params->delay); + sec_part->preq = params->preq; + + return 0; +} + +static int iwl_mvm_num_scans(struct iwl_mvm *mvm) +{ + return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); +} + +static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) +{ + /* This looks a bit arbitrary, but the idea is that if we run + * out of possible simultaneous scans and the userspace is + * trying to run a scan type that is already running, we + * return -EBUSY. But if the userspace wants to start a + * different type of scan, we stop the opposite type to make + * space for the new request. The reason is backwards + * compatibility with old wpa_supplicant that wouldn't stop a + * scheduled scan before starting a normal scan. + */ + + if (iwl_mvm_num_scans(mvm) < mvm->max_scans) + return 0; + + /* Use a switch, even though this is a bitmask, so that more + * than one bits set will fall in default and we will warn. + */ + switch (type) { + case IWL_MVM_SCAN_REGULAR: + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) + return -EBUSY; + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); + case IWL_MVM_SCAN_SCHED: + if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) + return -EBUSY; + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); + case IWL_MVM_SCAN_NETDETECT: + /* No need to stop anything for net-detect since the + * firmware is restarted anyway. This way, any sched + * scans that were running will be restarted when we + * resume. + */ + return 0; + default: + WARN_ON(1); + break; + } + + return -EIO; +} + +int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct iwl_host_cmd hcmd = { + .len = { iwl_mvm_scan_size(mvm), }, + .data = { mvm->scan_cmd, }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_mvm_scan_params params = {}; + int ret; + struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 }; + + lockdep_assert_held(&mvm->mutex); + + if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { + IWL_ERR(mvm, "scan while LAR regdomain is not set\n"); + return -EBUSY; + } + + ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); + if (ret) + return ret; + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(!mvm->scan_cmd)) + return -ENOMEM; + + if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) + return -ENOBUFS; + + params.n_ssids = req->n_ssids; + params.flags = req->flags; + params.n_channels = req->n_channels; + params.delay = 0; + params.ssids = req->ssids; + params.channels = req->channels; + params.mac_addr = req->mac_addr; + params.mac_addr_mask = req->mac_addr_mask; + params.no_cck = req->no_cck; + params.pass_all = true; + params.n_match_sets = 0; + params.match_sets = NULL; + + params.scan_plans = &scan_plan; + params.n_scan_plans = 1; + + params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); + + iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, + IWL_MVM_SCAN_REGULAR); + } else { + hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; + ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); + } + + if (ret) + return ret; + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (ret) { + /* If the scan failed, it usually means that the FW was unable + * to allocate the time events. Warn on it, but maybe we + * should try to send the command again with different params. + */ + IWL_ERR(mvm, "Scan failed! ret %d\n", ret); + return ret; + } + + IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); + mvm->scan_status |= IWL_MVM_SCAN_REGULAR; + iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); + + return 0; +} + +int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies, + int type) +{ + struct iwl_host_cmd hcmd = { + .len = { iwl_mvm_scan_size(mvm), }, + .data = { mvm->scan_cmd, }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_mvm_scan_params params = {}; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { + IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n"); + return -EBUSY; + } + + ret = iwl_mvm_check_running_scans(mvm, type); + if (ret) + return ret; + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(!mvm->scan_cmd)) + return -ENOMEM; + + if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) + return -ENOBUFS; + + params.n_ssids = req->n_ssids; + params.flags = req->flags; + params.n_channels = req->n_channels; + params.ssids = req->ssids; + params.channels = req->channels; + params.mac_addr = req->mac_addr; + params.mac_addr_mask = req->mac_addr_mask; + params.no_cck = false; + params.pass_all = iwl_mvm_scan_pass_all(mvm, req); + params.n_match_sets = req->n_match_sets; + params.match_sets = req->match_sets; + if (!req->n_scan_plans) + return -EINVAL; + + params.n_scan_plans = req->n_scan_plans; + params.scan_plans = req->scan_plans; + + params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); + + /* In theory, LMAC scans can handle a 32-bit delay, but since + * waiting for over 18 hours to start the scan is a bit silly + * and to keep it aligned with UMAC scans (which only support + * 16-bit delays), trim it down to 16-bits. + */ + if (req->delay > U16_MAX) { + IWL_DEBUG_SCAN(mvm, + "delay value is > 16-bits, set to max possible\n"); + params.delay = U16_MAX; + } else { + params.delay = req->delay; + } + + ret = iwl_mvm_config_sched_scan_profiles(mvm, req); + if (ret) + return ret; + + iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); + } else { + hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; + ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); + } + + if (ret) + return ret; + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (!ret) { + IWL_DEBUG_SCAN(mvm, + "Sched scan request was sent successfully\n"); + mvm->scan_status |= type; + } else { + /* If the scan failed, it usually means that the FW was unable + * to allocate the time events. Warn on it, but maybe we + * should try to send the command again with different params. + */ + IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); + } + + return ret; +} + +void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_umac_scan_complete *notif = (void *)pkt->data; + u32 uid = __le32_to_cpu(notif->uid); + bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); + + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) + return; + + /* if the scan is already stopping, we don't need to notify mac80211 */ + if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { + ieee80211_scan_completed(mvm->hw, aborted); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { + ieee80211_sched_scan_stopped(mvm->hw); + } + + mvm->scan_status &= ~mvm->scan_uid_status[uid]; + IWL_DEBUG_SCAN(mvm, + "Scan completed, uid %u type %u, status %s, EBS status %s\n", + uid, mvm->scan_uid_status[uid], + notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? + "completed" : "aborted", + iwl_mvm_ebs_status_str(notif->ebs_status)); + IWL_DEBUG_SCAN(mvm, + "Last line %d, Last iteration %d, Time from last iteration %d\n", + notif->last_schedule, notif->last_iter, + __le32_to_cpu(notif->time_from_last_iter)); + + if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && + notif->ebs_status != IWL_SCAN_EBS_INACTIVE) + mvm->last_ebs_successful = false; + + mvm->scan_uid_status[uid] = 0; +} + +void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; + u8 buf[256]; + + IWL_DEBUG_SCAN(mvm, + "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", + notif->status, notif->scanned_channels, + iwl_mvm_dump_channel_list(notif->results, + notif->scanned_channels, buf, + sizeof(buf))); +} + +static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) +{ + struct iwl_umac_scan_abort cmd = {}; + int uid, ret; + + lockdep_assert_held(&mvm->mutex); + + /* We should always get a valid index here, because we already + * checked that this type of scan was running in the generic + * code. + */ + uid = iwl_mvm_scan_uid_by_status(mvm, type); + if (WARN_ON_ONCE(uid < 0)) + return uid; + + cmd.uid = cpu_to_le32(uid); + + IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); + + ret = iwl_mvm_send_cmd_pdu(mvm, + iwl_cmd_id(SCAN_ABORT_UMAC, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(cmd), &cmd); + if (!ret) + mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + + return ret; +} + +static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) +{ + struct iwl_notification_wait wait_scan_done; + static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, + SCAN_OFFLOAD_COMPLETE, }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, + scan_done_notif, + ARRAY_SIZE(scan_done_notif), + NULL, NULL); + + IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + ret = iwl_mvm_umac_scan_abort(mvm, type); + else + ret = iwl_mvm_lmac_scan_abort(mvm); + + if (ret) { + IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); + iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); + return ret; + } + + ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); + + return ret; +} + +int iwl_mvm_scan_size(struct iwl_mvm *mvm) +{ + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + return sizeof(struct iwl_scan_req_umac) + + sizeof(struct iwl_scan_channel_cfg_umac) * + mvm->fw->ucode_capa.n_scan_channels + + sizeof(struct iwl_scan_req_umac_tail); + + return sizeof(struct iwl_scan_req_lmac) + + sizeof(struct iwl_scan_channel_cfg_lmac) * + mvm->fw->ucode_capa.n_scan_channels + + sizeof(struct iwl_scan_probe_req); +} + +/* + * This function is used in nic restart flow, to inform mac80211 about scans + * that was aborted by restart flow or by an assert. + */ +void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) +{ + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + int uid, i; + + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); + if (uid >= 0) { + ieee80211_scan_completed(mvm->hw, true); + mvm->scan_uid_status[uid] = 0; + } + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); + if (uid >= 0 && !mvm->restart_fw) { + ieee80211_sched_scan_stopped(mvm->hw); + mvm->scan_uid_status[uid] = 0; + } + + /* We shouldn't have any UIDs still set. Loop over all the + * UIDs to make sure there's nothing left there and warn if + * any is found. + */ + for (i = 0; i < mvm->max_scans; i++) { + if (WARN_ONCE(mvm->scan_uid_status[i], + "UMAC scan UID %d status was not cleaned\n", + i)) + mvm->scan_uid_status[i] = 0; + } + } else { + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) + ieee80211_scan_completed(mvm->hw, true); + + /* Sched scan will be restarted by mac80211 in + * restart_hw, so do not report if FW is about to be + * restarted. + */ + if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw) + ieee80211_sched_scan_stopped(mvm->hw); + } +} + +int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) +{ + int ret; + + if (!(mvm->scan_status & type)) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; + goto out; + } + + ret = iwl_mvm_scan_stop_wait(mvm, type); + if (!ret) + mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; +out: + /* Clear the scan status so the next scan requests will + * succeed and mark the scan as stopping, so that the Rx + * handler doesn't do anything, as the scan was stopped from + * above. + */ + mvm->scan_status &= ~type; + + if (type == IWL_MVM_SCAN_REGULAR) { + /* Since the rx handler won't do anything now, we have + * to release the scan reference here. + */ + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + if (notify) + ieee80211_scan_completed(mvm->hw, true); + } else if (notify) { + ieee80211_sched_scan_stopped(mvm->hw); + } + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c new file mode 100644 index 000000000000..b0f59fdd287c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -0,0 +1,340 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "mvm.h" + +/* For counting bound interfaces */ +struct iwl_mvm_active_iface_iterator_data { + struct ieee80211_vif *ignore_vif; + u8 sta_vif_ap_sta_id; + enum iwl_sf_state sta_vif_state; + int num_active_macs; +}; + +/* + * Count bound interfaces which are not p2p, besides data->ignore_vif. + * data->station_vif will point to one bound vif of type station, if exists. + */ +static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_active_iface_iterator_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif == data->ignore_vif || !mvmvif->phy_ctxt || + vif->type == NL80211_IFTYPE_P2P_DEVICE) + return; + + data->num_active_macs++; + + if (vif->type == NL80211_IFTYPE_STATION) { + data->sta_vif_ap_sta_id = mvmvif->ap_sta_id; + if (vif->bss_conf.assoc) + data->sta_vif_state = SF_FULL_ON; + else + data->sta_vif_state = SF_INIT_OFF; + } +} + +/* + * Aging and idle timeouts for the different possible scenarios + * in default configuration + */ +static const +__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { + { + cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF), + cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF) + }, + { + cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF), + cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF) + }, + { + cpu_to_le32(SF_MCAST_AGING_TIMER_DEF), + cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF) + }, + { + cpu_to_le32(SF_BA_AGING_TIMER_DEF), + cpu_to_le32(SF_BA_IDLE_TIMER_DEF) + }, + { + cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF), + cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF) + }, +}; + +/* + * Aging and idle timeouts for the different possible scenarios + * in single BSS MAC configuration. + */ +static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { + { + cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER), + cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER) + }, + { + cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER), + cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER) + }, + { + cpu_to_le32(SF_MCAST_AGING_TIMER), + cpu_to_le32(SF_MCAST_IDLE_TIMER) + }, + { + cpu_to_le32(SF_BA_AGING_TIMER), + cpu_to_le32(SF_BA_IDLE_TIMER) + }, + { + cpu_to_le32(SF_TX_RE_AGING_TIMER), + cpu_to_le32(SF_TX_RE_IDLE_TIMER) + }, +}; + +static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, + struct iwl_sf_cfg_cmd *sf_cmd, + struct ieee80211_sta *sta) +{ + int i, j, watermark; + + sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN); + + /* + * If we are in association flow - check antenna configuration + * capabilities of the AP station, and choose the watermark accordingly. + */ + if (sta) { + if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) { + switch (sta->rx_nss) { + case 1: + watermark = SF_W_MARK_SISO; + break; + case 2: + watermark = SF_W_MARK_MIMO2; + break; + default: + watermark = SF_W_MARK_MIMO3; + break; + } + } else { + watermark = SF_W_MARK_LEGACY; + } + /* default watermark value for unassociated mode. */ + } else { + watermark = SF_W_MARK_MIMO2; + } + sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark); + + for (i = 0; i < SF_NUM_SCENARIO; i++) { + for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) { + sf_cmd->long_delay_timeouts[i][j] = + cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); + } + } + + if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { + BUILD_BUG_ON(sizeof(sf_full_timeout) != + sizeof(__le32) * SF_NUM_SCENARIO * + SF_NUM_TIMEOUT_TYPES); + + memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, + sizeof(sf_full_timeout)); + } else { + BUILD_BUG_ON(sizeof(sf_full_timeout_def) != + sizeof(__le32) * SF_NUM_SCENARIO * + SF_NUM_TIMEOUT_TYPES); + + memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, + sizeof(sf_full_timeout_def)); + } + +} + +static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, + enum iwl_sf_state new_state) +{ + struct iwl_sf_cfg_cmd sf_cmd = { + .state = cpu_to_le32(SF_FULL_ON), + }; + struct ieee80211_sta *sta; + int ret = 0; + + if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13) + sf_cmd.state = cpu_to_le32(new_state); + + if (mvm->cfg->disable_dummy_notification) + sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); + + /* + * If an associated AP sta changed its antenna configuration, the state + * will remain FULL_ON but SF parameters need to be reconsidered. + */ + if (new_state != SF_FULL_ON && mvm->sf_state == new_state) + return 0; + + switch (new_state) { + case SF_UNINIT: + if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) + iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); + break; + case SF_FULL_ON: + if (sta_id == IWL_MVM_STATION_COUNT) { + IWL_ERR(mvm, + "No station: Cannot switch SF to FULL_ON\n"); + return -EINVAL; + } + rcu_read_lock(); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) { + IWL_ERR(mvm, "Invalid station id\n"); + rcu_read_unlock(); + return -EINVAL; + } + iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); + rcu_read_unlock(); + break; + case SF_INIT_OFF: + iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); + break; + default: + WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", + new_state); + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, + sizeof(sf_cmd), &sf_cmd); + if (!ret) + mvm->sf_state = new_state; + + return ret; +} + +/* + * Update Smart fifo: + * Count bound interfaces that are not to be removed, ignoring p2p devices, + * and set new state accordingly. + */ +int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, + bool remove_vif) +{ + enum iwl_sf_state new_state; + u8 sta_id = IWL_MVM_STATION_COUNT; + struct iwl_mvm_vif *mvmvif = NULL; + struct iwl_mvm_active_iface_iterator_data data = { + .ignore_vif = changed_vif, + .sta_vif_state = SF_UNINIT, + .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, + }; + + /* + * Ignore the call if we are in HW Restart flow, or if the handled + * vif is a p2p device. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE)) + return 0; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bound_iface_iterator, + &data); + + /* If changed_vif exists and is not to be removed, add to the count */ + if (changed_vif && !remove_vif) + data.num_active_macs++; + + switch (data.num_active_macs) { + case 0: + /* If there are no active macs - change state to SF_INIT_OFF */ + new_state = SF_INIT_OFF; + break; + case 1: + if (remove_vif) { + /* The one active mac left is of type station + * and we filled the relevant data during iteration + */ + new_state = data.sta_vif_state; + sta_id = data.sta_vif_ap_sta_id; + } else { + if (WARN_ON(!changed_vif)) + return -EINVAL; + if (changed_vif->type != NL80211_IFTYPE_STATION) { + new_state = SF_UNINIT; + } else if (changed_vif->bss_conf.assoc && + changed_vif->bss_conf.dtim_period) { + mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); + sta_id = mvmvif->ap_sta_id; + new_state = SF_FULL_ON; + } else { + new_state = SF_INIT_OFF; + } + } + break; + default: + /* If there are multiple active macs - change to SF_UNINIT */ + new_state = SF_UNINIT; + } + return iwl_mvm_sf_config(mvm, sta_id, new_state); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c new file mode 100644 index 000000000000..300a249486e4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -0,0 +1,1810 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include + +#include "mvm.h" +#include "sta.h" +#include "rs.h" + +static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, + enum nl80211_iftype iftype) +{ + int sta_id; + u32 reserved_ids = 0; + + BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); + WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); + + lockdep_assert_held(&mvm->mutex); + + /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ + if (iftype != NL80211_IFTYPE_STATION) + reserved_ids = BIT(0); + + /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ + for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { + if (BIT(sta_id) & reserved_ids) + continue; + + if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex))) + return sta_id; + } + return IWL_MVM_STATION_COUNT; +} + +/* send station add/update command to firmware */ +int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + bool update) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_add_sta_cmd add_sta_cmd = { + .sta_id = mvm_sta->sta_id, + .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), + .add_modify = update ? 1 : 0, + .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | + STA_FLG_MIMO_EN_MSK), + }; + int ret; + u32 status; + u32 agg_size = 0, mpdu_dens = 0; + + if (!update) { + add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); + memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); + } + + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); + /* fall through */ + case IEEE80211_STA_RX_BW_80: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); + /* fall through */ + case IEEE80211_STA_RX_BW_40: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); + /* fall through */ + case IEEE80211_STA_RX_BW_20: + if (sta->ht_cap.ht_supported) + add_sta_cmd.station_flags |= + cpu_to_le32(STA_FLG_FAT_EN_20MHZ); + break; + } + + switch (sta->rx_nss) { + case 1: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); + break; + case 2: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); + break; + case 3 ... 8: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); + break; + } + + switch (sta->smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + break; + case IEEE80211_SMPS_STATIC: + /* override NSS */ + add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); + break; + case IEEE80211_SMPS_DYNAMIC: + add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); + break; + case IEEE80211_SMPS_OFF: + /* nothing */ + break; + } + + if (sta->ht_cap.ht_supported) { + add_sta_cmd.station_flags_msk |= + cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | + STA_FLG_AGG_MPDU_DENS_MSK); + + mpdu_dens = sta->ht_cap.ampdu_density; + } + + if (sta->vht_cap.vht_supported) { + agg_size = sta->vht_cap.cap & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + agg_size >>= + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + } else if (sta->ht_cap.ht_supported) { + agg_size = sta->ht_cap.ampdu_factor; + } + + add_sta_cmd.station_flags |= + cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); + add_sta_cmd.station_flags |= + cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), + &add_sta_cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "ADD_STA failed\n"); + break; + } + + return ret; +} + +static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + unsigned long used_hw_queues; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, NULL, true, false); + u32 ac; + + lockdep_assert_held(&mvm->mutex); + + used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); + + /* Find available queues, and allocate them to the ACs */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + u8 queue = find_first_zero_bit(&used_hw_queues, + mvm->first_agg_queue); + + if (queue >= mvm->first_agg_queue) { + IWL_ERR(mvm, "Failed to allocate STA queue\n"); + return -EBUSY; + } + + __set_bit(queue, &used_hw_queues); + mvmsta->hw_queue[ac] = queue; + } + + /* Found a place for all queues - enable them */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], + mvmsta->hw_queue[ac], + iwl_mvm_ac_to_tx_fifo[ac], 0, + wdg_timeout); + mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); + } + + return 0; +} + +static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned long sta_msk; + int i; + + lockdep_assert_held(&mvm->mutex); + + /* disable the TDLS STA-specific queues */ + sta_msk = mvmsta->tfd_queue_msk; + for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) + iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); +} + +int iwl_mvm_add_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int i, ret, sta_id; + + lockdep_assert_held(&mvm->mutex); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + sta_id = iwl_mvm_find_free_sta_id(mvm, + ieee80211_vif_type_p2p(vif)); + else + sta_id = mvm_sta->sta_id; + + if (sta_id == IWL_MVM_STATION_COUNT) + return -ENOSPC; + + if (vif->type == NL80211_IFTYPE_AP) { + mvmvif->ap_assoc_sta_count++; + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } + + spin_lock_init(&mvm_sta->lock); + + mvm_sta->sta_id = sta_id; + mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color); + mvm_sta->vif = vif; + mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + mvm_sta->tx_protection = 0; + mvm_sta->tt_tx_protection = false; + + /* HW restart, don't assume the memory has been zeroed */ + atomic_set(&mvm->pending_frames[sta_id], 0); + mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ + mvm_sta->tfd_queue_msk = 0; + + /* allocate new queues for a TDLS station */ + if (sta->tdls) { + ret = iwl_mvm_tdls_sta_init(mvm, sta); + if (ret) + return ret; + } else { + for (i = 0; i < IEEE80211_NUM_ACS; i++) + if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) + mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); + } + + /* for HW restart - reset everything but the sequence number */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u16 seq = mvm_sta->tid_data[i].seq_number; + memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); + mvm_sta->tid_data[i].seq_number = seq; + } + mvm_sta->agg_tids = 0; + + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); + if (ret) + goto err; + + if (vif->type == NL80211_IFTYPE_STATION) { + if (!sta->tdls) { + WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); + mvmvif->ap_sta_id = sta_id; + } else { + WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); + } + } + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); + + return 0; + +err: + iwl_mvm_tdls_sta_deinit(mvm, sta); + return ret; +} + +int iwl_mvm_update_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return iwl_mvm_sta_send_to_fw(mvm, sta, true); +} + +int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool drain) +{ + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; + cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", + mvmsta->sta_id); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", + mvmsta->sta_id); + break; + } + + return ret; +} + +/* + * Remove a station from the FW table. Before sending the command to remove + * the station validate that the station is indeed known to the driver (sanity + * only). + */ +static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { + .sta_id = sta_id, + }; + int ret; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + /* Note: internal stations are marked as error values */ + if (!sta) { + IWL_ERR(mvm, "Invalid station id\n"); + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, + sizeof(rm_sta_cmd), &rm_sta_cmd); + if (ret) { + IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); + return ret; + } + + return 0; +} + +void iwl_mvm_sta_drained_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); + u8 sta_id; + + /* + * The mutex is needed because of the SYNC cmd, but not only: if the + * work would run concurrently with iwl_mvm_rm_sta, it would run before + * iwl_mvm_rm_sta sets the station as busy, and exit. Then + * iwl_mvm_rm_sta would set the station as busy, and nobody will clean + * that later. + */ + mutex_lock(&mvm->mutex); + + for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { + int ret; + struct ieee80211_sta *sta = + rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + /* + * This station is in use or RCU-removed; the latter happens in + * managed mode, where mac80211 removes the station before we + * can remove it from firmware (we can only do that after the + * MAC is marked unassociated), and possibly while the deauth + * frame to disconnect from the AP is still queued. Then, the + * station pointer is -ENOENT when the last skb is reclaimed. + */ + if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) + continue; + + if (PTR_ERR(sta) == -EINVAL) { + IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", + sta_id); + continue; + } + + if (!sta) { + IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", + sta_id); + continue; + } + + WARN_ON(PTR_ERR(sta) != -EBUSY); + /* This station was removed and we waited until it got drained, + * we can now proceed and remove it. + */ + ret = iwl_mvm_rm_sta_common(mvm, sta_id); + if (ret) { + IWL_ERR(mvm, + "Couldn't remove sta %d after it was drained\n", + sta_id); + continue; + } + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); + clear_bit(sta_id, mvm->sta_drained); + + if (mvm->tfd_drained[sta_id]) { + unsigned long i, msk = mvm->tfd_drained[sta_id]; + + for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) + iwl_mvm_disable_txq(mvm, i, i, + IWL_MAX_TID_COUNT, 0); + + mvm->tfd_drained[sta_id] = 0; + IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", + sta_id, msk); + } + } + + mutex_unlock(&mvm->mutex); +} + +int iwl_mvm_rm_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == mvm_sta->sta_id) { + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (ret) + return ret; + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); + if (ret) + return ret; + ret = iwl_trans_wait_tx_queue_empty(mvm->trans, + mvm_sta->tfd_queue_msk); + if (ret) + return ret; + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + + /* if we are associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; + + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + + /* clear d0i3_ap_sta_id if no longer relevant */ + if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + } + + /* + * This shouldn't happen - the TDLS channel switch should be canceled + * before the STA is removed. + */ + if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { + mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + cancel_delayed_work(&mvm->tdls_cs.dwork); + } + + /* + * Make sure that the tx response code sees the station as -EBUSY and + * calls the drain worker. + */ + spin_lock_bh(&mvm_sta->lock); + /* + * There are frames pending on the AC queues for this station. + * We need to wait until all the frames are drained... + */ + if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], + ERR_PTR(-EBUSY)); + spin_unlock_bh(&mvm_sta->lock); + + /* disable TDLS sta queues on drain complete */ + if (sta->tdls) { + mvm->tfd_drained[mvm_sta->sta_id] = + mvm_sta->tfd_queue_msk; + IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", + mvm_sta->sta_id); + } + + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + } else { + spin_unlock_bh(&mvm_sta->lock); + + if (sta->tdls) + iwl_mvm_tdls_sta_deinit(mvm, sta); + + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); + } + + return ret; +} + +int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u8 sta_id) +{ + int ret = iwl_mvm_rm_sta_common(mvm, sta_id); + + lockdep_assert_held(&mvm->mutex); + + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); + return ret; +} + +static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 qmask, enum nl80211_iftype iftype) +{ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) + return -ENOSPC; + } + + sta->tfd_queue_msk = qmask; + + /* put a non-NULL value so iterating over the stations won't stop */ + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); + return 0; +} + +static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta) +{ + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); + memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); + sta->sta_id = IWL_MVM_STATION_COUNT; +} + +static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, + u16 mac_id, u16 color) +{ + struct iwl_mvm_add_sta_cmd cmd; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(cmd)); + cmd.sta_id = sta->sta_id; + cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, + color)); + + cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + + if (addr) + memcpy(cmd.addr, addr, ETH_ALEN); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "Internal station added.\n"); + return 0; + default: + ret = -EIO; + IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", + status); + break; + } + return ret; +} + +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) +{ + unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? + mvm->cfg->base_params->wd_timeout : + IWL_WATCHDOG_DISABLED; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* Map Aux queue to fifo - needs to happen before adding Aux station */ + iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); + + /* Allocate aux station and assign to it the aux queue */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), + NL80211_IFTYPE_UNSPECIFIED); + if (ret) + return ret; + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, + MAC_INDEX_AUX, 0); + + if (ret) + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + return ret; +} + +void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); +} + +/* + * Send the add station command for the vif's broadcast station. + * Assumes that the station was already allocated. + * + * @mvm: the mvm component + * @vif: the interface to which the broadcast station is added + * @bsta: the broadcast station to add. + */ +int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; + static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + const u8 *baddr = _baddr; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_ADHOC) + baddr = vif->bss_conf.bssid; + + if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) + return -ENOSPC; + + return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, + mvmvif->id, mvmvif->color); +} + +/* Send the FW a request to remove the station from it's internal data + * structures, but DO NOT remove the entry from the local data structures. */ +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + return ret; +} + +int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 qmask; + + lockdep_assert_held(&mvm->mutex); + + qmask = iwl_mvm_mac_get_queues_mask(vif); + + /* + * The firmware defines the TFD queue mask to only be relevant + * for *unicast* queues, so the multicast (CAB) queue shouldn't + * be included. + */ + if (vif->type == NL80211_IFTYPE_AP) + qmask &= ~BIT(vif->cab_queue); + + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, + ieee80211_vif_type_p2p(vif)); +} + +/* Allocate a new station entry for the broadcast station to the given vif, + * and send it to the FW. + * Note that each P2P mac should have its own broadcast station. + * + * @mvm: the mvm component + * @vif: the interface to which the broadcast station is added + * @bsta: the broadcast station to add. */ +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); + if (ret) + return ret; + + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + + if (ret) + iwl_mvm_dealloc_int_sta(mvm, bsta); + + return ret; +} + +void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); +} + +/* + * Send the FW a request to remove the station from it's internal data + * structures, and in addition remove it from the local data structure. + */ +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); + + iwl_mvm_dealloc_bcast_sta(mvm, vif); + + return ret; +} + +#define IWL_MAX_RX_BA_SESSIONS 16 + +int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u16 ssn, bool start) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { + IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); + return -ENOSPC; + } + + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + cmd.sta_id = mvm_sta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + if (start) { + cmd.add_immediate_ba_tid = (u8) tid; + cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); + } else { + cmd.remove_immediate_ba_tid = (u8) tid; + } + cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : + STA_MODIFY_REMOVE_BA_TID; + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", + start ? "start" : "stopp"); + break; + case ADD_STA_IMMEDIATE_BA_FAILURE: + IWL_WARN(mvm, "RX BA Session refused by fw\n"); + ret = -ENOSPC; + break; + default: + ret = -EIO; + IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", + start ? "start" : "stopp", status); + break; + } + + if (!ret) { + if (start) + mvm->rx_ba_sessions++; + else if (mvm->rx_ba_sessions > 0) + /* check that restart flow didn't zero the counter */ + mvm->rx_ba_sessions--; + } + + return ret; +} + +static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u8 queue, bool start) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_add_sta_cmd cmd = {}; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + if (start) { + mvm_sta->tfd_queue_msk |= BIT(queue); + mvm_sta->tid_disable_agg &= ~BIT(tid); + } else { + mvm_sta->tfd_queue_msk &= ~BIT(queue); + mvm_sta->tid_disable_agg |= BIT(tid); + } + + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); + cmd.sta_id = mvm_sta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; + cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + &cmd, &status); + if (ret) + return ret; + + switch (status) { + case ADD_STA_SUCCESS: + break; + default: + ret = -EIO; + IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", + start ? "start" : "stopp", status); + break; + } + + return ret; +} + +const u8 tid_to_mac80211_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, +}; + +static const u8 tid_to_ucode_ac[] = { + AC_BE, + AC_BK, + AC_BK, + AC_BE, + AC_VI, + AC_VI, + AC_VO, + AC_VO, +}; + +int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data; + int txq_id; + int ret; + + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + + if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { + IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", + mvmsta->tid_data[tid].state); + return -ENXIO; + } + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvmsta->lock); + + /* possible race condition - we entered D0i3 while starting agg */ + if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { + spin_unlock_bh(&mvmsta->lock); + IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); + return -EIO; + } + + spin_lock_bh(&mvm->queue_info_lock); + + txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, + mvm->last_agg_queue); + if (txq_id < 0) { + ret = txq_id; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Failed to allocate agg queue\n"); + goto release_locks; + } + mvm->queue_info[txq_id].setup_reserved = true; + spin_unlock_bh(&mvm->queue_info_lock); + + tid_data = &mvmsta->tid_data[tid]; + tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); + tid_data->txq_id = txq_id; + *ssn = tid_data->ssn; + + IWL_DEBUG_TX_QUEUES(mvm, + "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", + mvmsta->sta_id, tid, txq_id, tid_data->ssn, + tid_data->next_reclaimed); + + if (tid_data->ssn == tid_data->next_reclaimed) { + tid_data->state = IWL_AGG_STARTING; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + + ret = 0; + +release_locks: + spin_unlock_bh(&mvmsta->lock); + + return ret; +} + +int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); + int queue, fifo, ret; + u16 ssn; + + BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) + != IWL_MAX_TID_COUNT); + + buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); + + spin_lock_bh(&mvmsta->lock); + ssn = tid_data->ssn; + queue = tid_data->txq_id; + tid_data->state = IWL_AGG_ON; + mvmsta->agg_tids |= BIT(tid); + tid_data->ssn = 0xffff; + spin_unlock_bh(&mvmsta->lock); + + fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + + iwl_mvm_enable_agg_txq(mvm, queue, + vif->hw_queue[tid_to_mac80211_ac[tid]], fifo, + mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout); + + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + return -EIO; + + /* No need to mark as reserved */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].setup_reserved = false; + spin_unlock_bh(&mvm->queue_info_lock); + + /* + * Even though in theory the peer could have different + * aggregation reorder buffer sizes for different sessions, + * our ucode doesn't allow for that and has a global limit + * for each station. Therefore, use the minimum of all the + * aggregation sessions and our default value. + */ + mvmsta->max_agg_bufsize = + min(mvmsta->max_agg_bufsize, buf_size); + mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + + IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + + return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); +} + +int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + u16 txq_id; + int err; + + + /* + * If mac80211 is cleaning its state, then say that we finished since + * our state has been cleared anyway. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + return 0; + } + + spin_lock_bh(&mvmsta->lock); + + txq_id = tid_data->txq_id; + + IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", + mvmsta->sta_id, tid, txq_id, tid_data->state); + + mvmsta->agg_tids &= ~BIT(tid); + + /* No need to mark as reserved anymore */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[txq_id].setup_reserved = false; + spin_unlock_bh(&mvm->queue_info_lock); + + switch (tid_data->state) { + case IWL_AGG_ON: + tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); + + IWL_DEBUG_TX_QUEUES(mvm, + "ssn = %d, next_recl = %d\n", + tid_data->ssn, tid_data->next_reclaimed); + + /* There are still packets for this RA / TID in the HW */ + if (tid_data->ssn != tid_data->next_reclaimed) { + tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; + err = 0; + break; + } + + tid_data->ssn = 0xffff; + tid_data->state = IWL_AGG_OFF; + spin_unlock_bh(&mvmsta->lock); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); + + iwl_mvm_disable_txq(mvm, txq_id, + vif->hw_queue[tid_to_mac80211_ac[tid]], tid, + 0); + return 0; + case IWL_AGG_STARTING: + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * The agg session has been stopped before it was set up. This + * can happen when the AddBA timer times out for example. + */ + + /* No barriers since we are under mutex */ + lockdep_assert_held(&mvm->mutex); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + tid_data->state = IWL_AGG_OFF; + err = 0; + break; + default: + IWL_ERR(mvm, + "Stopping AGG while state not ON or starting for %d on %d (%d)\n", + mvmsta->sta_id, tid, tid_data->state); + IWL_ERR(mvm, + "\ttid_data->txq_id = %d\n", tid_data->txq_id); + err = -EINVAL; + } + + spin_unlock_bh(&mvmsta->lock); + + return err; +} + +int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + u16 txq_id; + enum iwl_mvm_agg_state old_state; + + /* + * First set the agg state to OFF to avoid calling + * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. + */ + spin_lock_bh(&mvmsta->lock); + txq_id = tid_data->txq_id; + IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", + mvmsta->sta_id, tid, txq_id, tid_data->state); + old_state = tid_data->state; + tid_data->state = IWL_AGG_OFF; + mvmsta->agg_tids &= ~BIT(tid); + spin_unlock_bh(&mvmsta->lock); + + /* No need to mark as reserved */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[txq_id].setup_reserved = false; + spin_unlock_bh(&mvm->queue_info_lock); + + if (old_state >= IWL_AGG_ON) { + iwl_mvm_drain_sta(mvm, mvmsta, true); + if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) + IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); + iwl_trans_wait_tx_queue_empty(mvm->trans, + mvmsta->tfd_queue_msk); + iwl_mvm_drain_sta(mvm, mvmsta, false); + + iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); + + iwl_mvm_disable_txq(mvm, tid_data->txq_id, + vif->hw_queue[tid_to_mac80211_ac[tid]], tid, + 0); + } + + return 0; +} + +static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) +{ + int i, max = -1, max_offs = -1; + + lockdep_assert_held(&mvm->mutex); + + /* Pick the unused key offset with the highest 'deleted' + * counter. Every time a key is deleted, all the counters + * are incremented and the one that was just deleted is + * reset to zero. Thus, the highest counter is the one + * that was deleted longest ago. Pick that one. + */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (test_bit(i, mvm->fw_key_table)) + continue; + if (mvm->fw_key_deleted[i] > max) { + max = mvm->fw_key_deleted[i]; + max_offs = i; + } + } + + if (max_offs < 0) + return STA_KEY_IDX_INVALID; + + __set_bit(max_offs, mvm->fw_key_table); + + return max_offs; +} + +static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (sta) { + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + return mvm_sta->sta_id; + } + + /* + * The device expects GTKs for station interfaces to be + * installed as GTKs for the AP station. If we have no + * station ID, then use AP's station ID. + */ + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) + return mvmvif->ap_sta_id; + + return IWL_MVM_STATION_COUNT; +} + +static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct ieee80211_key_conf *keyconf, bool mcast, + u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) +{ + struct iwl_mvm_add_sta_key_cmd cmd = {}; + __le16 key_flags; + int ret; + u32 status; + u16 keyidx; + int i; + u8 sta_id = mvm_sta->sta_id; + + keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK; + key_flags = cpu_to_le16(keyidx); + key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); + cmd.tkip_rx_tsc_byte2 = tkip_iv32; + for (i = 0; i < 5; i++) + cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); + memcpy(cmd.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_CCMP: + key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); + memcpy(cmd.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_WEP104: + key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); + memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); + break; + default: + key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); + memcpy(cmd.key, keyconf->key, keyconf->keylen); + } + + if (mcast) + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + + cmd.key_offset = keyconf->hw_key_idx; + cmd.key_flags = key_flags; + cmd.sta_id = sta_id; + + status = ADD_STA_SUCCESS; + if (cmd_flags & CMD_ASYNC) + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, + sizeof(cmd), &cmd); + else + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), + &cmd, &status); + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); + break; + } + + return ret; +} + +static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, + struct ieee80211_key_conf *keyconf, + u8 sta_id, bool remove_key) +{ + struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; + + /* verify the key details match the required command's expectations */ + if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) || + (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || + (keyconf->keyidx != 4 && keyconf->keyidx != 5))) + return -EINVAL; + + igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); + igtk_cmd.sta_id = cpu_to_le32(sta_id); + + if (remove_key) { + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); + } else { + struct ieee80211_key_seq seq; + const u8 *pn; + + memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + pn = seq.aes_cmac.pn; + igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | + ((u64) pn[4] << 8) | + ((u64) pn[3] << 16) | + ((u64) pn[2] << 24) | + ((u64) pn[1] << 32) | + ((u64) pn[0] << 40)); + } + + IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", + remove_key ? "removing" : "installing", + igtk_cmd.sta_id); + + return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, + sizeof(igtk_cmd), &igtk_cmd); +} + + +static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (sta) + return sta->addr; + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + u8 sta_id = mvmvif->ap_sta_id; + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + return sta->addr; + } + + + return NULL; +} + +static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf, + bool mcast) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret; + const u8 *addr; + struct ieee80211_key_seq seq; + u16 p1k[5]; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + /* get phase 1 key from mac80211 */ + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + seq.tkip.iv32, p1k, 0); + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + 0, NULL, 0); + break; + default: + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + 0, NULL, 0); + } + + return ret; +} + +static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, + struct ieee80211_key_conf *keyconf, + bool mcast) +{ + struct iwl_mvm_add_sta_key_cmd cmd = {}; + __le16 key_flags; + int ret; + u32 status; + + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK); + key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); + key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); + + if (mcast) + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + + cmd.key_flags = key_flags; + cmd.key_offset = keyconf->hw_key_idx; + cmd.sta_id = sta_id; + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), + &cmd, &status); + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); + break; + } + + return ret; +} + +int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf, + bool have_key_offset) +{ + bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + u8 sta_id; + int ret; + static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; + + lockdep_assert_held(&mvm->mutex); + + /* Get the station id from the mvm local station table */ + sta_id = iwl_mvm_get_key_sta_id(vif, sta); + if (sta_id == IWL_MVM_STATION_COUNT) { + IWL_ERR(mvm, "Failed to find station id\n"); + return -EINVAL; + } + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); + goto end; + } + + /* + * It is possible that the 'sta' parameter is NULL, and thus + * there is a need to retrieve the sta from the local station table. + */ + if (!sta) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) { + IWL_ERR(mvm, "Invalid station id\n"); + return -EINVAL; + } + } + + if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) + return -EINVAL; + + if (!have_key_offset) { + /* + * The D3 firmware hardcodes the PTK offset to 0, so we have to + * configure it there. As a result, this workaround exists to + * let the caller set the key offset (hw_key_idx), see d3.c. + */ + keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); + if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) + return -ENOSPC; + } + + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); + if (ret) { + __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + goto end; + } + + /* + * For WEP, the same key is used for multicast and unicast. Upload it + * again, using the same key offset, and now pointing the other one + * to the same key slot (offset). + * If this fails, remove the original as well. + */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); + if (ret) { + __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); + } + } + +end: + IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta ? sta->addr : zero_addr, ret); + return ret; +} + +int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + u8 sta_id; + int ret, i; + + lockdep_assert_held(&mvm->mutex); + + /* Get the station id from the mvm local station table */ + sta_id = iwl_mvm_get_key_sta_id(vif, sta); + + IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); + + if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { + IWL_ERR(mvm, "offset %d not used in fw key table.\n", + keyconf->hw_key_idx); + return -ENOENT; + } + + /* track which key was deleted last */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (mvm->fw_key_deleted[i] < U8_MAX) + mvm->fw_key_deleted[i]++; + } + mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; + + if (sta_id == IWL_MVM_STATION_COUNT) { + IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); + return 0; + } + + /* + * It is possible that the 'sta' parameter is NULL, and thus + * there is a need to retrieve the sta from the local station table, + * for example when a GTK is removed (where the sta_id will then be + * the AP ID, and no station was passed by mac80211.) + */ + if (!sta) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (!sta) { + IWL_ERR(mvm, "Invalid station id\n"); + return -EINVAL; + } + } + + if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) + return -EINVAL; + + ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); + if (ret) + return ret; + + /* delete WEP key twice to get rid of (now useless) offset */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) + ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); + + return ret; +} + +void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key) +{ + struct iwl_mvm_sta *mvm_sta; + u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); + bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + + if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) + return; + + rcu_read_lock(); + + if (!sta) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return; + } + } + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + iv32, phase1key, CMD_ASYNC); + rcu_read_unlock(); +} + +void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = mvmsta->sta_id, + .station_flags_msk = cpu_to_le32(STA_FLG_PS), + .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), + }; + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} + +void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_frame_release_type reason, + u16 cnt, u16 tids, bool more_data, + bool agg) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = mvmsta->sta_id, + .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, + .sleep_tx_count = cpu_to_le16(cnt), + .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), + }; + int tid, ret; + unsigned long _tids = tids; + + /* convert TIDs to ACs - we don't support TSPEC so that's OK + * Note that this field is reserved and unused by firmware not + * supporting GO uAPSD, so it's safe to always do this. + */ + for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) + cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); + + /* If we're releasing frames from aggregation queues then check if the + * all queues combined that we're releasing frames from have + * - more frames than the service period, in which case more_data + * needs to be set + * - fewer than 'cnt' frames, in which case we need to adjust the + * firmware command (but do that unconditionally) + */ + if (agg) { + int remaining = cnt; + + spin_lock_bh(&mvmsta->lock); + for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { + struct iwl_mvm_tid_data *tid_data; + u16 n_queued; + + tid_data = &mvmsta->tid_data[tid]; + if (WARN(tid_data->state != IWL_AGG_ON && + tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, + "TID %d state is %d\n", + tid, tid_data->state)) { + spin_unlock_bh(&mvmsta->lock); + ieee80211_sta_eosp(sta); + return; + } + + n_queued = iwl_mvm_tid_queued(tid_data); + if (n_queued > remaining) { + more_data = true; + remaining = 0; + break; + } + remaining -= n_queued; + } + spin_unlock_bh(&mvmsta->lock); + + cmd.sleep_tx_count = cpu_to_le16(cnt - remaining); + if (WARN_ON(cnt - remaining == 0)) { + ieee80211_sta_eosp(sta); + return; + } + } + + /* Note: this is ignored by firmware not supporting GO uAPSD */ + if (more_data) + cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); + + if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { + mvmsta->next_status_eosp = true; + cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); + } else { + cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); + } + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} + +void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; + struct ieee80211_sta *sta; + u32 sta_id = le32_to_cpu(notif->sta_id); + + if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) + return; + + rcu_read_lock(); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (!IS_ERR_OR_NULL(sta)) + ieee80211_sta_eosp(sta); + rcu_read_unlock(); +} + +void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, bool disable) +{ + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = mvmsta->sta_id, + .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, + .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), + .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), + }; + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} + +void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + bool disable) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvm_sta->lock); + + if (mvm_sta->disable_tx == disable) { + spin_unlock_bh(&mvm_sta->lock); + return; + } + + mvm_sta->disable_tx = disable; + + /* + * Tell mac80211 to start/stop queuing tx for this station, + * but don't stop queuing if there are still pending frames + * for this station. + */ + if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) + ieee80211_sta_block_awake(mvm->hw, sta, disable); + + iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); + + spin_unlock_bh(&mvm_sta->lock); +} + +void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + bool disable) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvm_sta; + int i; + + lockdep_assert_held(&mvm->mutex); + + /* Block/unblock all the stations of the given mvmvif */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + if (mvm_sta->mac_id_n_color != + FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) + continue; + + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); + } +} + +void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvmsta; + + rcu_read_lock(); + + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + + if (!WARN_ON(!mvmsta)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); + + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h new file mode 100644 index 000000000000..eedb215eba3f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -0,0 +1,426 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __sta_h__ +#define __sta_h__ + +#include +#include +#include + +#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ +#include "fw-api.h" /* IWL_MVM_STATION_COUNT */ +#include "rs.h" + +struct iwl_mvm; +struct iwl_mvm_vif; + +/** + * DOC: station table - introduction + * + * The station table is a list of data structure that reprensent the stations. + * In STA/P2P client mode, the driver will hold one station for the AP/ GO. + * In GO/AP mode, the driver will have as many stations as associated clients. + * All these stations are reflected in the fw's station table. The driver + * keeps the fw's station table up to date with the ADD_STA command. Stations + * can be removed by the REMOVE_STA command. + * + * All the data related to a station is held in the structure %iwl_mvm_sta + * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. + * This data includes the index of the station in the fw, per tid information + * (sequence numbers, Block-ack state machine, etc...). The stations are + * created and deleted by the %sta_state callback from %ieee80211_ops. + * + * The driver holds a map: %fw_id_to_mac_id that allows to fetch a + * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw + * station index. That way, the driver is able to get the tid related data in + * O(1) in time sensitive paths (Tx / Tx response / BA notification). These + * paths are triggered by the fw, and the driver needs to get a pointer to the + * %ieee80211 structure. This map helps to get that pointer quickly. + */ + +/** + * DOC: station table - locking + * + * As stated before, the station is created / deleted by mac80211's %sta_state + * callback from %ieee80211_ops which can sleep. The next paragraph explains + * the locking of a single stations, the next ones relates to the station + * table. + * + * The station holds the sequence number per tid. So this data needs to be + * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack + * information (the state machine / and the logic that checks if the queues + * were drained), so it also needs to be accessible from the Tx response flow. + * In short, the station needs to be access from sleepable context as well as + * from tasklets, so the station itself needs a spinlock. + * + * The writers of %fw_id_to_mac_id map are serialized by the global mutex of + * the mvm op_mode. This is possible since %sta_state can sleep. + * The pointers in this map are RCU protected, hence we won't replace the + * station while we have Tx / Tx response / BA notification running. + * + * If a station is deleted while it still has packets in its A-MPDU queues, + * then the reclaim flow will notice that there is no station in the map for + * sta_id and it will dump the responses. + */ + +/** + * DOC: station table - internal stations + * + * The FW needs a few internal stations that are not reflected in + * mac80211, such as broadcast station in AP / GO mode, or AUX sta for + * scanning and P2P device (during the GO negotiation). + * For these kind of stations we have %iwl_mvm_int_sta struct which holds the + * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. + * Usually the data for these stations is static, so no locking is required, + * and no TID data as this is also not needed. + * One thing to note, is that these stations have an ID in the fw, but not + * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id + * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of + * pointers from this mapping need to check that the value is not error + * or NULL. + * + * Currently there is only one auxiliary station for scanning, initialized + * on init. + */ + +/** + * DOC: station table - AP Station in STA mode + * + * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: + * %ap_sta_id. To get the point to the corresponding %ieee80211_sta, + * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove + * the AP station from the fw before setting the MAC context as unassociated. + * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is + * removed by mac80211, but the station won't be removed in the fw until the + * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. + */ + +/** + * DOC: station table - Drain vs. Flush + * + * Flush means that all the frames in the SCD queue are dumped regardless the + * station to which they were sent. We do that when we disassociate and before + * we remove the STA of the AP. The flush can be done synchronously against the + * fw. + * Drain means that the fw will drop all the frames sent to a specific station. + * This is useful when a client (if we are IBSS / GO or AP) disassociates. In + * that case, we need to drain all the frames for that client from the AC queues + * that are shared with the other clients. Only then, we can remove the STA in + * the fw. In order to do so, we track the non-AMPDU packets for each station. + * If mac80211 removes a STA and if it still has non-AMPDU packets pending in + * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all + * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped + * (we know about it with its Tx response), we remove the station in fw and set + * it as %NULL in %fw_id_to_mac_id: this is the purpose of + * %iwl_mvm_sta_drained_wk. + */ + +/** + * DOC: station table - fw restart + * + * When the fw asserts, or we have any other issue that requires to reset the + * driver, we require mac80211 to reconfigure the driver. Since the private + * data of the stations is embed in mac80211's %ieee80211_sta, that data will + * not be zeroed and needs to be reinitialized manually. + * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us + * that we must not allocate a new sta_id but reuse the previous one. This + * means that the stations being re-added after the reset will have the same + * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id + * map, since the stations aren't in the fw any more. Internal stations that + * are not added by mac80211 will be re-added in the init flow that is called + * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to + * %iwl_mvm_up. + */ + +/** + * DOC: AP mode - PS + * + * When a station is asleep, the fw will set it as "asleep". All frames on + * shared queues (i.e. non-aggregation queues) to that station will be dropped + * by the fw (%TX_STATUS_FAIL_DEST_PS failure code). + * + * AMPDUs are in a separate queue that is stopped by the fw. We just need to + * let mac80211 know when there are frames in these queues so that it can + * properly handle trigger frames. + * + * When a trigger frame is received, mac80211 tells the driver to send frames + * from the AMPDU queues or sends frames to non-aggregation queues itself, + * depending on which ACs are delivery-enabled and what TID has frames to + * transmit. Note that mac80211 has all the knowledge since all the non-agg + * frames are buffered / filtered, and the driver tells mac80211 about agg + * frames). The driver needs to tell the fw to let frames out even if the + * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count. + * + * When we receive a frame from that station with PM bit unset, the driver + * needs to let the fw know that this station isn't asleep any more. This is + * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the + * station's wakeup. + * + * For a GO, the Service Period might be cut short due to an absence period + * of the GO. In this (and all other cases) the firmware notifies us with the + * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we + * already sent to the device will be rejected again. + * + * See also "AP support for powersaving clients" in mac80211.h. + */ + +/** + * enum iwl_mvm_agg_state + * + * The state machine of the BA agreement establishment / tear down. + * These states relate to a specific RA / TID. + * + * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_STARTING: aggregation are starting (between start and oper) + * @IWL_AGG_ON: aggregation session is up + * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the + * HW queue to be empty from packets for this RA /TID. + */ +enum iwl_mvm_agg_state { + IWL_AGG_OFF = 0, + IWL_AGG_STARTING, + IWL_AGG_ON, + IWL_EMPTYING_HW_QUEUE_ADDBA, + IWL_EMPTYING_HW_QUEUE_DELBA, +}; + +/** + * struct iwl_mvm_tid_data - holds the states for each RA / TID + * @seq_number: the next WiFi sequence number to use + * @next_reclaimed: the WiFi sequence number of the next packet to be acked. + * This is basically (last acked packet++). + * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the + * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). + * @reduced_tpc: Reduced tx power. Holds the data between the + * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). + * @state: state of the BA agreement establishment / tear down. + * @txq_id: Tx queue used by the BA session + * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or + * the first packet to be sent in legacy HW queue in Tx AGG stop flow. + * Basically when next_reclaimed reaches ssn, we can tell mac80211 that + * we are ready to finish the Tx AGG stop / start flow. + * @tx_time: medium time consumed by this A-MPDU + */ +struct iwl_mvm_tid_data { + u16 seq_number; + u16 next_reclaimed; + /* The rest is Tx AGG related */ + u32 rate_n_flags; + u8 reduced_tpc; + enum iwl_mvm_agg_state state; + u16 txq_id; + u16 ssn; + u16 tx_time; +}; + +static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) +{ + return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number), + tid_data->next_reclaimed); +} + +/** + * struct iwl_mvm_sta - representation of a station in the driver + * @sta_id: the index of the station in the fw (will be replaced by id_n_color) + * @tfd_queue_msk: the tfd queues used by the station + * @hw_queue: per-AC mapping of the TFD queues used by station + * @mac_id_n_color: the MAC context this station is linked to + * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for + * tid. + * @max_agg_bufsize: the maximal size of the AGG buffer for this station + * @bt_reduced_txpower: is reduced tx power enabled for this station + * @next_status_eosp: the next reclaimed packet is a PS-Poll response and + * we need to signal the EOSP + * @lock: lock to protect the whole struct. Since %tid_data is access from Tx + * and from Tx response flow, it needs a spinlock. + * @tid_data: per tid data. Look at %iwl_mvm_tid_data. + * @tx_protection: reference counter for controlling the Tx protection. + * @tt_tx_protection: is thermal throttling enable Tx protection? + * @disable_tx: is tx to this STA disabled? + * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is placed in that + * space. + * + */ +struct iwl_mvm_sta { + u32 sta_id; + u32 tfd_queue_msk; + u8 hw_queue[IEEE80211_NUM_ACS]; + u32 mac_id_n_color; + u16 tid_disable_agg; + u8 max_agg_bufsize; + bool bt_reduced_txpower; + bool next_status_eosp; + spinlock_t lock; + struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; + struct iwl_lq_sta lq_sta; + struct ieee80211_vif *vif; + + /* Temporary, until the new TLC will control the Tx protection */ + s8 tx_protection; + bool tt_tx_protection; + + bool disable_tx; + u8 agg_tids; +}; + +static inline struct iwl_mvm_sta * +iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) +{ + return (void *)sta->drv_priv; +} + +/** + * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or + * broadcast) + * @sta_id: the index of the station in the fw (will be replaced by id_n_color) + * @tfd_queue_msk: the tfd queues used by the station + */ +struct iwl_mvm_int_sta { + u32 sta_id; + u32 tfd_queue_msk; +}; + +int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + bool update); +int iwl_mvm_add_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_update_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_rm_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u8 sta_id); +int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + bool have_key_offset); +int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); + +void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, + u16 *phase1key); + +void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/* AMPDU */ +int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u16 ssn, bool start); +int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u8 buf_size); +int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); + +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); +void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); + +int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + +void iwl_mvm_sta_drained_wk(struct work_struct *wk); +void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); +void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_frame_release_type reason, + u16 cnt, u16 tids, bool more_data, + bool agg); +int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool drain); +void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, bool disable); +void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + bool disable); +void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + bool disable); +void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + +#endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c new file mode 100644 index 000000000000..fe2fa5650443 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -0,0 +1,732 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include "mvm.h" +#include "time-event.h" +#include "iwl-io.h" +#include "iwl-prph.h" + +#define TU_TO_US(x) (x * 1024) +#define TU_TO_MS(x) (TU_TO_US(x) / 1000) + +void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta || IS_ERR(sta) || !sta->tdls) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, + GFP_KERNEL); + } +} + +int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int count = 0; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta || IS_ERR(sta) || !sta->tdls) + continue; + + if (vif) { + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (mvmsta->vif != vif) + continue; + } + + count++; + } + + return count; +} + +static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_rx_packet *pkt; + struct iwl_tdls_config_res *resp; + struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; + struct iwl_host_cmd cmd = { + .id = TDLS_CONFIG_CMD, + .flags = CMD_WANT_SKB, + .data = { &tdls_cfg_cmd, }, + .len = { sizeof(struct iwl_tdls_config_cmd), }, + }; + struct ieee80211_sta *sta; + int ret, i, cnt; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + tdls_cfg_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; + tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ + + /* for now the Tx cmd is empty and unused */ + + /* populate TDLS peer data */ + cnt = 0; + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta) || !sta->tdls) + continue; + + tdls_cfg_cmd.sta_info[cnt].sta_id = i; + tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = + IWL_MVM_TDLS_FW_TID; + tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); + tdls_cfg_cmd.sta_info[cnt].is_initiator = + cpu_to_le32(sta->tdls_initiator ? 1 : 0); + + cnt++; + } + + tdls_cfg_cmd.tdls_peer_count = cnt; + IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (WARN_ON_ONCE(ret)) + return; + + pkt = cmd.resp_pkt; + + WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); + + /* we don't really care about the response at this point */ + + iwl_free_resp(&cmd); +} + +void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool sta_added) +{ + int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); + + /* when the first peer joins, send a power update first */ + if (tdls_sta_cnt == 1 && sta_added) + iwl_mvm_power_update_mac(mvm); + + /* configure the FW with TDLS peer info */ + iwl_mvm_tdls_config(mvm, vif); + + /* when the last peer leaves, send a power update last */ + if (tdls_sta_cnt == 0 && !sta_added) + iwl_mvm_power_update_mac(mvm); +} + +void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; + + /* + * iwl_mvm_protect_session() reads directly from the device + * (the system time), so make sure it is available. + */ + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) + return; + + mutex_lock(&mvm->mutex); + /* Protect the session to hear the TDLS setup response on the channel */ + iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); +} + +static const char * +iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) +{ + switch (state) { + case IWL_MVM_TDLS_SW_IDLE: + return "IDLE"; + case IWL_MVM_TDLS_SW_REQ_SENT: + return "REQ SENT"; + case IWL_MVM_TDLS_SW_RESP_RCVD: + return "RESP RECEIVED"; + case IWL_MVM_TDLS_SW_REQ_RCVD: + return "REQ RECEIVED"; + case IWL_MVM_TDLS_SW_ACTIVE: + return "ACTIVE"; + } + + return NULL; +} + +static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, + enum iwl_mvm_tdls_cs_state state) +{ + if (mvm->tdls_cs.state == state) + return; + + IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", + iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), + iwl_mvm_tdls_cs_state_str(state)); + mvm->tdls_cs.state = state; + + /* we only send requests to our switching peer - update sent time */ + if (state == IWL_MVM_TDLS_SW_REQ_SENT) + mvm->tdls_cs.peer.sent_timestamp = + iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); + + if (state == IWL_MVM_TDLS_SW_IDLE) + mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; +} + +void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; + struct ieee80211_sta *sta; + unsigned int delay; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; + u32 sta_id = le32_to_cpu(notif->sta_id); + + lockdep_assert_held(&mvm->mutex); + + /* can fail sometimes */ + if (!le32_to_cpu(notif->status)) { + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); + return; + } + + if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) + return; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + /* the station may not be here, but if it is, it must be a TDLS peer */ + if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; + + /* + * Update state and possibly switch again after this is over (DTIM). + * Also convert TU to msec. + */ + delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); + mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); + + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); +} + +static int +iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, + enum iwl_tdls_channel_switch_type type, + const u8 *peer, bool peer_initiator, u32 timestamp) +{ + bool same_peer = false; + int ret = 0; + + /* get the existing peer if it's there */ + if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && + mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], + lockdep_is_held(&mvm->mutex)); + if (!IS_ERR_OR_NULL(sta)) + same_peer = ether_addr_equal(peer, sta->addr); + } + + switch (mvm->tdls_cs.state) { + case IWL_MVM_TDLS_SW_IDLE: + /* + * might be spurious packet from the peer after the switch is + * already done + */ + if (type == TDLS_MOVE_CH) + ret = -EINVAL; + break; + case IWL_MVM_TDLS_SW_REQ_SENT: + /* only allow requests from the same peer */ + if (!same_peer) + ret = -EBUSY; + else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && + !peer_initiator) + /* + * We received a ch-switch request while an outgoing + * one is pending. Allow it if the peer is the link + * initiator. + */ + ret = -EBUSY; + else if (type == TDLS_SEND_CHAN_SW_REQ) + /* wait for idle before sending another request */ + ret = -EBUSY; + else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp) + /* we got a stale response - ignore it */ + ret = -EINVAL; + break; + case IWL_MVM_TDLS_SW_RESP_RCVD: + /* + * we are waiting for the FW to give an "active" notification, + * so ignore requests in the meantime + */ + ret = -EBUSY; + break; + case IWL_MVM_TDLS_SW_REQ_RCVD: + /* as above, allow the link initiator to proceed */ + if (type == TDLS_SEND_CHAN_SW_REQ) { + if (!same_peer) + ret = -EBUSY; + else if (peer_initiator) /* they are the initiator */ + ret = -EBUSY; + } else if (type == TDLS_MOVE_CH) { + ret = -EINVAL; + } + break; + case IWL_MVM_TDLS_SW_ACTIVE: + /* + * the only valid request when active is a request to return + * to the base channel by the current off-channel peer + */ + if (type != TDLS_MOVE_CH || !same_peer) + ret = -EBUSY; + break; + } + + if (ret) + IWL_DEBUG_TDLS(mvm, + "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", + type, mvm->tdls_cs.state, peer, same_peer, + peer_initiator); + + return ret; +} + +static int +iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_tdls_channel_switch_type type, + const u8 *peer, bool peer_initiator, + u8 oper_class, + struct cfg80211_chan_def *chandef, + u32 timestamp, u16 switch_time, + u16 switch_timeout, struct sk_buff *skb, + u32 ch_sw_tm_ie) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_tx_info *info; + struct ieee80211_hdr *hdr; + struct iwl_tdls_channel_switch_cmd cmd = {0}; + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, + timestamp); + if (ret) + return ret; + + if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { + ret = -EINVAL; + goto out; + } + + cmd.switch_type = type; + cmd.timing.frame_timestamp = cpu_to_le32(timestamp); + cmd.timing.switch_time = cpu_to_le32(switch_time); + cmd.timing.switch_timeout = cpu_to_le32(switch_timeout); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, peer); + if (!sta) { + rcu_read_unlock(); + ret = -ENOENT; + goto out; + } + mvmsta = iwl_mvm_sta_from_mac80211(sta); + cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); + + if (!chandef) { + if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && + mvm->tdls_cs.peer.chandef.chan) { + /* actually moving to the channel */ + chandef = &mvm->tdls_cs.peer.chandef; + } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && + type == TDLS_MOVE_CH) { + /* we need to return to base channel */ + struct ieee80211_chanctx_conf *chanctx = + rcu_dereference(vif->chanctx_conf); + + if (WARN_ON_ONCE(!chanctx)) { + rcu_read_unlock(); + goto out; + } + + chandef = &chanctx->def; + } + } + + if (chandef) { + cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + PHY_BAND_24 : PHY_BAND_5); + cmd.ci.channel = chandef->chan->hw_value; + cmd.ci.width = iwl_mvm_get_channel_width(chandef); + cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); + } + + /* keep quota calculation simple for now - 50% of DTIM for TDLS */ + cmd.timing.max_offchan_duration = + cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int) / 2); + + /* Switch time is the first element in the switch-timing IE. */ + cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); + + info = IEEE80211_SKB_CB(skb); + hdr = (void *)skb->data; + if (info->control.hw_key) { + if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { + rcu_read_unlock(); + ret = -EINVAL; + goto out; + } + iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); + } + + iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, + mvmsta->sta_id); + + iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, + hdr->frame_control); + rcu_read_unlock(); + + memcpy(cmd.frame.data, skb->data, skb->len); + + ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, + sizeof(cmd), &cmd); + if (ret) { + IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", + ret); + goto out; + } + + /* channel switch has started, update state */ + if (type != TDLS_MOVE_CH) { + mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; + iwl_mvm_tdls_update_cs_state(mvm, + type == TDLS_SEND_CHAN_SW_REQ ? + IWL_MVM_TDLS_SW_REQ_SENT : + IWL_MVM_TDLS_SW_REQ_RCVD); + } else { + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD); + } + +out: + + /* channel switch failed - we are idle */ + if (ret) + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); + + return ret; +} + +void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) +{ + struct iwl_mvm *mvm; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; + unsigned int delay; + int ret; + + mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); + mutex_lock(&mvm->mutex); + + /* called after an active channel switch has finished or timed-out */ + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); + + /* station might be gone, in that case do nothing */ + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) + goto out; + + sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], + lockdep_is_held(&mvm->mutex)); + /* the station may not be here, but if it is, it must be a TDLS peer */ + if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) + goto out; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; + ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, + TDLS_SEND_CHAN_SW_REQ, + sta->addr, + mvm->tdls_cs.peer.initiator, + mvm->tdls_cs.peer.op_class, + &mvm->tdls_cs.peer.chandef, + 0, 0, 0, + mvm->tdls_cs.peer.skb, + mvm->tdls_cs.peer.ch_sw_tm_ie); + if (ret) + IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); + + /* retry after a DTIM if we failed sending now */ + delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); + queue_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); +out: + mutex_unlock(&mvm->mutex); +} + +int +iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvmsta; + unsigned int delay; + int ret; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", + sta->addr, chandef->chan->center_freq, chandef->width); + + /* we only support a single peer for channel switching */ + if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { + IWL_DEBUG_TDLS(mvm, + "Existing peer. Can't start switch with %pM\n", + sta->addr); + ret = -EBUSY; + goto out; + } + + ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, + TDLS_SEND_CHAN_SW_REQ, + sta->addr, sta->tdls_initiator, + oper_class, chandef, 0, 0, 0, + tmpl_skb, ch_sw_tm_ie); + if (ret) + goto out; + + /* + * Mark the peer as "in tdls switch" for this vif. We only allow a + * single such peer per vif. + */ + mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); + if (!mvm->tdls_cs.peer.skb) { + ret = -ENOMEM; + goto out; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; + mvm->tdls_cs.peer.chandef = *chandef; + mvm->tdls_cs.peer.initiator = sta->tdls_initiator; + mvm->tdls_cs.peer.op_class = oper_class; + mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; + + /* + * Wait for 2 DTIM periods before attempting the next switch. The next + * switch will be made sooner if the current one completes before that. + */ + delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int); + mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_sta *cur_sta; + bool wait_for_phy = false; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); + + /* we only support a single peer for channel switching */ + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { + IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); + goto out; + } + + cur_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], + lockdep_is_held(&mvm->mutex)); + /* make sure it's the same peer */ + if (cur_sta != sta) + goto out; + + /* + * If we're currently in a switch because of the now canceled peer, + * wait a DTIM here to make sure the phy is back on the base channel. + * We can't otherwise force it. + */ + if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && + mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) + wait_for_phy = true; + + mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + dev_kfree_skb(mvm->tdls_cs.peer.skb); + mvm->tdls_cs.peer.skb = NULL; + +out: + mutex_unlock(&mvm->mutex); + + /* make sure the phy is on the base channel */ + if (wait_for_phy) + msleep(TU_TO_MS(vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int)); + + /* flush the channel switch state */ + flush_delayed_work(&mvm->tdls_cs.dwork); + + IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); +} + +void +iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + enum iwl_tdls_channel_switch_type type; + unsigned int delay; + const char *action_str = + params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? + "REQ" : "RESP"; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_TDLS(mvm, + "Received TDLS ch switch action %s from %pM status %d\n", + action_str, params->sta->addr, params->status); + + /* + * we got a non-zero status from a peer we were switching to - move to + * the idle state and retry again later + */ + if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && + params->status != 0 && + mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && + mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *cur_sta; + + /* make sure it's the same peer */ + cur_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], + lockdep_is_held(&mvm->mutex)); + if (cur_sta == params->sta) { + iwl_mvm_tdls_update_cs_state(mvm, + IWL_MVM_TDLS_SW_IDLE); + goto retry; + } + } + + type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; + + iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, + params->sta->tdls_initiator, 0, + params->chandef, params->timestamp, + params->switch_time, + params->switch_timeout, + params->tmpl_skb, + params->ch_sw_tm_ie); + +retry: + /* register a timeout in case we don't succeed in switching */ + delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * + 1024 / 1000; + mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); + mutex_unlock(&mvm->mutex); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h new file mode 100644 index 000000000000..79ab6beb6b26 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h @@ -0,0 +1,97 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __IWL_MVM_TESTMODE_H__ +#define __IWL_MVM_TESTMODE_H__ + +/** + * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA + * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute) + * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32) + * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32) + * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32) + */ +enum iwl_mvm_testmode_attrs { + IWL_MVM_TM_ATTR_UNSPEC, + IWL_MVM_TM_ATTR_CMD, + IWL_MVM_TM_ATTR_NOA_DURATION, + IWL_MVM_TM_ATTR_BEACON_FILTER_STATE, + + /* keep last */ + NUM_IWL_MVM_TM_ATTRS, + IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1, +}; + +/** + * enum iwl_mvm_testmode_commands - MVM testmode commands + * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing + * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on + */ +enum iwl_mvm_testmode_commands { + IWL_MVM_TM_CMD_SET_NOA, + IWL_MVM_TM_CMD_SET_BEACON_FILTER, +}; + +#endif /* __IWL_MVM_TESTMODE_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c new file mode 100644 index 000000000000..7530eb23035d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -0,0 +1,872 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include + +#include "iwl-notif-wait.h" +#include "iwl-trans.h" +#include "fw-api.h" +#include "time-event.h" +#include "mvm.h" +#include "iwl-io.h" +#include "iwl-prph.h" + +/* + * For the high priority TE use a time event type that has similar priority to + * the FW's action scan priority. + */ +#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE +#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC + +void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data) +{ + lockdep_assert_held(&mvm->time_event_lock); + + if (!te_data->vif) + return; + + list_del(&te_data->list); + te_data->running = false; + te_data->uid = 0; + te_data->id = TE_MAX; + te_data->vif = NULL; +} + +void iwl_mvm_roc_done_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); + u32 queues = 0; + + /* + * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. + * This will cause the TX path to drop offchannel transmissions. + * That would also be done by mac80211, but it is racy, in particular + * in the case that the time event actually completed in the firmware + * (which is handled in iwl_mvm_te_handle_notif). + */ + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) { + queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); + } + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { + queues |= BIT(mvm->aux_queue); + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX); + } + + synchronize_net(); + + /* + * Flush the offchannel queue -- this is called when the time + * event finishes or is canceled, so that frames queued for it + * won't get stuck on the queue and be transmitted in the next + * time event. + * We have to send the command asynchronously since this cannot + * be under the mutex for locking reasons, but that's not an + * issue as it will have to complete before the next command is + * executed, and a new time event means a new command. + */ + iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC); +} + +static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) +{ + /* + * Of course, our status bit is just as racy as mac80211, so in + * addition, fire off the work struct which will drop all frames + * from the hardware queues that made it through the race. First + * it will of course synchronize the TX path to make sure that + * any *new* TX will be rejected. + */ + schedule_work(&mvm->roc_done_wk); +} + +static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) +{ + struct ieee80211_vif *csa_vif; + + rcu_read_lock(); + + csa_vif = rcu_dereference(mvm->csa_vif); + if (!csa_vif || !csa_vif->csa_active) + goto out_unlock; + + IWL_DEBUG_TE(mvm, "CSA NOA started\n"); + + /* + * CSA NoA is started but we still have beacons to + * transmit on the current channel. + * So we just do nothing here and the switch + * will be performed on the last TBTT. + */ + if (!ieee80211_csa_is_complete(csa_vif)) { + IWL_WARN(mvm, "CSA NOA started too early\n"); + goto out_unlock; + } + + ieee80211_csa_finish(csa_vif); + + rcu_read_unlock(); + + RCU_INIT_POINTER(mvm->csa_vif, NULL); + + return; + +out_unlock: + rcu_read_unlock(); +} + +static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const char *errmsg) +{ + if (vif->type != NL80211_IFTYPE_STATION) + return false; + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) + return false; + if (errmsg) + IWL_ERR(mvm, "%s\n", errmsg); + + iwl_mvm_connection_loss(mvm, vif, errmsg); + return true; +} + +static void +iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data, + struct iwl_time_event_notif *notif) +{ + struct ieee80211_vif *vif = te_data->vif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!notif->status) + IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); + + switch (te_data->vif->type) { + case NL80211_IFTYPE_AP: + if (!notif->status) + mvmvif->csa_failed = true; + iwl_mvm_csa_noa_start(mvm); + break; + case NL80211_IFTYPE_STATION: + if (!notif->status) { + iwl_mvm_connection_loss(mvm, vif, + "CSA TE failed to start"); + break; + } + iwl_mvm_csa_client_absent(mvm, te_data->vif); + ieee80211_chswitch_done(te_data->vif, true); + break; + default: + /* should never happen */ + WARN_ON_ONCE(1); + break; + } + + /* we don't need it anymore */ + iwl_mvm_te_clear_data(mvm, te_data); +} + +static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, + struct iwl_time_event_notif *notif, + struct iwl_mvm_time_event_data *te_data) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_time_event *te_trig; + int i; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); + te_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig)) + return; + + for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { + u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); + u32 trig_action_bitmap = + le32_to_cpu(te_trig->time_events[i].action_bitmap); + u32 trig_status_bitmap = + le32_to_cpu(te_trig->time_events[i].status_bitmap); + + if (trig_te_id != te_data->id || + !(trig_action_bitmap & le32_to_cpu(notif->action)) || + !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) + continue; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Time event %d Action 0x%x received status: %d", + te_data->id, + le32_to_cpu(notif->action), + le32_to_cpu(notif->status)); + break; + } +} + +/* + * Handles a FW notification for an event that is known to the driver. + * + * @mvm: the mvm component + * @te_data: the time event data + * @notif: the notification data corresponding the time event data. + */ +static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data, + struct iwl_time_event_notif *notif) +{ + lockdep_assert_held(&mvm->time_event_lock); + + IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", + le32_to_cpu(notif->unique_id), + le32_to_cpu(notif->action)); + + iwl_mvm_te_check_trigger(mvm, notif, te_data); + + /* + * The FW sends the start/end time event notifications even for events + * that it fails to schedule. This is indicated in the status field of + * the notification. This happens in cases that the scheduler cannot + * find a schedule that can handle the event (for example requesting a + * P2P Device discoveribility, while there are other higher priority + * events in the system). + */ + if (!le32_to_cpu(notif->status)) { + const char *msg; + + if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) + msg = "Time Event start notification failure"; + else + msg = "Time Event end notification failure"; + + IWL_DEBUG_TE(mvm, "%s\n", msg); + + if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) { + iwl_mvm_te_clear_data(mvm, te_data); + return; + } + } + + if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { + IWL_DEBUG_TE(mvm, + "TE ended - current time %lu, estimated end %lu\n", + jiffies, te_data->end_jiffies); + + switch (te_data->vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + ieee80211_remain_on_channel_expired(mvm->hw); + iwl_mvm_roc_finished(mvm); + break; + case NL80211_IFTYPE_STATION: + /* + * By now, we should have finished association + * and know the dtim period. + */ + iwl_mvm_te_check_disconnect(mvm, te_data->vif, + "No association and the time event is over already..."); + break; + default: + break; + } + + iwl_mvm_te_clear_data(mvm, te_data); + } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { + te_data->running = true; + te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); + + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + iwl_mvm_ref(mvm, IWL_MVM_REF_ROC); + ieee80211_ready_on_channel(mvm->hw); + } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { + iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); + } + } else { + IWL_WARN(mvm, "Got TE with unknown action\n"); + } +} + +/* + * Handle A Aux ROC time event + */ +static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, + struct iwl_time_event_notif *notif) +{ + struct iwl_mvm_time_event_data *te_data, *tmp; + bool aux_roc_te = false; + + list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { + if (le32_to_cpu(notif->unique_id) == te_data->uid) { + aux_roc_te = true; + break; + } + } + if (!aux_roc_te) /* Not a Aux ROC time event */ + return -EINVAL; + + iwl_mvm_te_check_trigger(mvm, notif, te_data); + + if (!le32_to_cpu(notif->status)) { + IWL_DEBUG_TE(mvm, + "ERROR: Aux ROC Time Event %s notification failure\n", + (le32_to_cpu(notif->action) & + TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end"); + return -EINVAL; + } + + IWL_DEBUG_TE(mvm, + "Aux ROC time event notification - UID = 0x%x action %d\n", + le32_to_cpu(notif->unique_id), + le32_to_cpu(notif->action)); + + if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { + /* End TE, notify mac80211 */ + ieee80211_remain_on_channel_expired(mvm->hw); + iwl_mvm_roc_finished(mvm); /* flush aux queue */ + list_del(&te_data->list); /* remove from list */ + te_data->running = false; + te_data->vif = NULL; + te_data->uid = 0; + te_data->id = TE_MAX; + } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { + set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); + te_data->running = true; + iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX); + ieee80211_ready_on_channel(mvm->hw); /* Start TE */ + } else { + IWL_DEBUG_TE(mvm, + "ERROR: Unknown Aux ROC Time Event (action = %d)\n", + le32_to_cpu(notif->action)); + return -EINVAL; + } + + return 0; +} + +/* + * The Rx handler for time event notifications + */ +void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_time_event_notif *notif = (void *)pkt->data; + struct iwl_mvm_time_event_data *te_data, *tmp; + + IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", + le32_to_cpu(notif->unique_id), + le32_to_cpu(notif->action)); + + spin_lock_bh(&mvm->time_event_lock); + /* This time event is triggered for Aux ROC request */ + if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) + goto unlock; + + list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { + if (le32_to_cpu(notif->unique_id) == te_data->uid) + iwl_mvm_te_handle_notif(mvm, te_data, notif); + } +unlock: + spin_unlock_bh(&mvm->time_event_lock); +} + +static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_time_event_data *te_data = data; + struct iwl_time_event_notif *resp; + int resp_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); + return true; + } + + resp = (void *)pkt->data; + + /* te_data->uid is already set in the TIME_EVENT_CMD response */ + if (le32_to_cpu(resp->unique_id) != te_data->uid) + return false; + + IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", + te_data->uid); + if (!resp->status) + IWL_ERR(mvm, + "TIME_EVENT_NOTIFICATION received but not executed\n"); + + return true; +} + +static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_time_event_data *te_data = data; + struct iwl_time_event_resp *resp; + int resp_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); + return true; + } + + resp = (void *)pkt->data; + + /* we should never get a response to another TIME_EVENT_CMD here */ + if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) + return false; + + te_data->uid = le32_to_cpu(resp->unique_id); + IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", + te_data->uid); + return true; +} + +static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_time_event_data *te_data, + struct iwl_time_event_cmd *te_cmd) +{ + static const u16 time_event_response[] = { TIME_EVENT_CMD }; + struct iwl_notification_wait wait_time_event; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", + le32_to_cpu(te_cmd->duration)); + + spin_lock_bh(&mvm->time_event_lock); + if (WARN_ON(te_data->id != TE_MAX)) { + spin_unlock_bh(&mvm->time_event_lock); + return -EIO; + } + te_data->vif = vif; + te_data->duration = le32_to_cpu(te_cmd->duration); + te_data->id = le32_to_cpu(te_cmd->id); + list_add_tail(&te_data->list, &mvm->time_event_list); + spin_unlock_bh(&mvm->time_event_lock); + + /* + * Use a notification wait, which really just processes the + * command response and doesn't wait for anything, in order + * to be able to process the response and get the UID inside + * the RX path. Using CMD_WANT_SKB doesn't work because it + * stores the buffer and then wakes up this thread, by which + * time another notification (that the time event started) + * might already be processed unsuccessfully. + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, + time_event_response, + ARRAY_SIZE(time_event_response), + iwl_mvm_time_event_response, te_data); + + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, + sizeof(*te_cmd), te_cmd); + if (ret) { + IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); + iwl_remove_notification(&mvm->notif_wait, &wait_time_event); + goto out_clear_te; + } + + /* No need to wait for anything, so just pass 1 (0 isn't valid) */ + ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); + /* should never fail */ + WARN_ON_ONCE(ret); + + if (ret) { + out_clear_te: + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); + } + return ret; +} + +void iwl_mvm_protect_session(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + u32 max_delay, bool wait_for_notif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; + struct iwl_notification_wait wait_te_notif; + struct iwl_time_event_cmd time_cmd = {}; + + lockdep_assert_held(&mvm->mutex); + + if (te_data->running && + time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { + IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", + jiffies_to_msecs(te_data->end_jiffies - jiffies)); + return; + } + + if (te_data->running) { + IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", + te_data->uid, + jiffies_to_msecs(te_data->end_jiffies - jiffies)); + /* + * we don't have enough time + * cancel the current TE and issue a new one + * Of course it would be better to remove the old one only + * when the new one is added, but we don't care if we are off + * channel for a bit. All we need to do, is not to return + * before we actually begin to be on the channel. + */ + iwl_mvm_stop_session_protection(mvm, vif); + } + + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); + + time_cmd.apply_time = cpu_to_le32(0); + + time_cmd.max_frags = TE_V2_FRAG_NONE; + time_cmd.max_delay = cpu_to_le32(max_delay); + /* TODO: why do we need to interval = bi if it is not periodic? */ + time_cmd.interval = cpu_to_le32(1); + time_cmd.duration = cpu_to_le32(duration); + time_cmd.repeat = 1; + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_NOTIF_HOST_EVENT_END | + T2_V2_START_IMMEDIATELY); + + if (!wait_for_notif) { + iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + return; + } + + /* + * Create notification_wait for the TIME_EVENT_NOTIFICATION to use + * right after we send the time event + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, + te_notif_response, + ARRAY_SIZE(te_notif_response), + iwl_mvm_te_notif, te_data); + + /* If TE was sent OK - wait for the notification that started */ + if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { + IWL_ERR(mvm, "Failed to add TE to protect session\n"); + iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); + } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, + TU_TO_JIFFIES(max_delay))) { + IWL_ERR(mvm, "Failed to protect session until TE\n"); + } +} + +static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data, + u32 *uid) +{ + u32 id; + + /* + * It is possible that by the time we got to this point the time + * event was already removed. + */ + spin_lock_bh(&mvm->time_event_lock); + + /* Save time event uid before clearing its data */ + *uid = te_data->uid; + id = te_data->id; + + /* + * The clear_data function handles time events that were already removed + */ + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); + + /* + * It is possible that by the time we try to remove it, the time event + * has already ended and removed. In such a case there is no need to + * send a removal command. + */ + if (id == TE_MAX) { + IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); + return false; + } + + return true; +} + +/* + * Explicit request to remove a aux roc time event. The removal of a time + * event needs to be synchronized with the flow of a time event's end + * notification, which also removes the time event from the op mode + * data structures. + */ +static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data) +{ + struct iwl_hs20_roc_req aux_cmd = {}; + u32 uid; + int ret; + + if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) + return; + + aux_cmd.event_unique_id = cpu_to_le32(uid); + aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); + aux_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", + le32_to_cpu(aux_cmd.event_unique_id)); + ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, + sizeof(aux_cmd), &aux_cmd); + + if (WARN_ON(ret)) + return; +} + +/* + * Explicit request to remove a time event. The removal of a time event needs to + * be synchronized with the flow of a time event's end notification, which also + * removes the time event from the op mode data structures. + */ +void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data) +{ + struct iwl_time_event_cmd time_cmd = {}; + u32 uid; + int ret; + + if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) + return; + + /* When we remove a TE, the UID is to be set in the id field */ + time_cmd.id = cpu_to_le32(uid); + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + + IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, + sizeof(time_cmd), &time_cmd); + if (WARN_ON(ret)) + return; +} + +void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + + lockdep_assert_held(&mvm->mutex); + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); +} + +int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + int duration, enum ieee80211_roc_type type) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + struct iwl_time_event_cmd time_cmd = {}; + + lockdep_assert_held(&mvm->mutex); + if (te_data->running) { + IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); + return -EBUSY; + } + + /* + * Flush the done work, just in case it's still pending, so that + * the work it does can complete and we can accept new frames. + */ + flush_work(&mvm->roc_done_wk); + + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + + switch (type) { + case IEEE80211_ROC_TYPE_NORMAL: + time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); + break; + case IEEE80211_ROC_TYPE_MGMT_TX: + time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); + break; + default: + WARN_ONCE(1, "Got an invalid ROC type\n"); + return -EINVAL; + } + + time_cmd.apply_time = cpu_to_le32(0); + time_cmd.interval = cpu_to_le32(1); + + /* + * The P2P Device TEs can have lower priority than other events + * that are being scheduled by the driver/fw, and thus it might not be + * scheduled. To improve the chances of it being scheduled, allow them + * to be fragmented, and in addition allow them to be delayed. + */ + time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); + time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); + time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); + time_cmd.repeat = 1; + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_NOTIF_HOST_EVENT_END | + T2_V2_START_IMMEDIATELY); + + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); +} + +void iwl_mvm_stop_roc(struct iwl_mvm *mvm) +{ + struct iwl_mvm_vif *mvmvif = NULL; + struct iwl_mvm_time_event_data *te_data; + bool is_p2p = false; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->time_event_lock); + + /* + * Iterate over the list of time events and find the time event that is + * associated with a P2P_DEVICE interface. + * This assumes that a P2P_DEVICE interface can have only a single time + * event at any given time and this time event coresponds to a ROC + * request + */ + list_for_each_entry(te_data, &mvm->time_event_list, list) { + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + is_p2p = true; + goto remove_te; + } + } + + /* There can only be at most one AUX ROC time event, we just use the + * list to simplify/unify code. Remove it if it exists. + */ + te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, + struct iwl_mvm_time_event_data, + list); + if (te_data) + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + +remove_te: + spin_unlock_bh(&mvm->time_event_lock); + + if (!mvmvif) { + IWL_WARN(mvm, "No remain on channel event\n"); + return; + } + + if (is_p2p) + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); + else + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); + + iwl_mvm_roc_finished(mvm); +} + +int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 apply_time) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + struct iwl_time_event_cmd time_cmd = {}; + + lockdep_assert_held(&mvm->mutex); + + if (te_data->running) { + IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); + return -EBUSY; + } + + time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + time_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); + time_cmd.apply_time = cpu_to_le32(apply_time); + time_cmd.max_frags = TE_V2_FRAG_NONE; + time_cmd.duration = cpu_to_le32(duration); + time_cmd.repeat = 1; + time_cmd.interval = cpu_to_le32(1); + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_ABSENCE); + + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h new file mode 100644 index 000000000000..cbdf8e52a5f1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -0,0 +1,249 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __time_event_h__ +#define __time_event_h__ + +#include "fw-api.h" + +#include "mvm.h" + +/** + * DOC: Time Events - what is it? + * + * Time Events are a fw feature that allows the driver to control the presence + * of the device on the channel. Since the fw supports multiple channels + * concurrently, the fw may choose to jump to another channel at any time. + * In order to make sure that the fw is on a specific channel at a certain time + * and for a certain duration, the driver needs to issue a time event. + * + * The simplest example is for BSS association. The driver issues a time event, + * waits for it to start, and only then tells mac80211 that we can start the + * association. This way, we make sure that the association will be done + * smoothly and won't be interrupted by channel switch decided within the fw. + */ + + /** + * DOC: The flow against the fw + * + * When the driver needs to make sure we are in a certain channel, at a certain + * time and for a certain duration, it sends a Time Event. The flow against the + * fw goes like this: + * 1) Driver sends a TIME_EVENT_CMD to the fw + * 2) Driver gets the response for that command. This response contains the + * Unique ID (UID) of the event. + * 3) The fw sends notification when the event starts. + * + * Of course the API provides various options that allow to cover parameters + * of the flow. + * What is the duration of the event? + * What is the start time of the event? + * Is there an end-time for the event? + * How much can the event be delayed? + * Can the event be split? + * If yes what is the maximal number of chunks? + * etc... + */ + +/** + * DOC: Abstraction to the driver + * + * In order to simplify the use of time events to the rest of the driver, + * we abstract the use of time events. This component provides the functions + * needed by the driver. + */ + +#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500 +#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400 + +/** + * iwl_mvm_protect_session - start / extend the session protection. + * @mvm: the mvm component + * @vif: the virtual interface for which the session is issued + * @duration: the duration of the session in TU. + * @min_duration: will start a new session if the current session will end + * in less than min_duration. + * @max_delay: maximum delay before starting the time event (in TU) + * @wait_for_notif: true if it is required that a time event notification be + * waited for (that the time event has been scheduled before returning) + * + * This function can be used to start a session protection which means that the + * fw will stay on the channel for %duration_ms milliseconds. This function + * can block (sleep) until the session starts. This function can also be used + * to extend a currently running session. + * This function is meant to be used for BSS association for example, where we + * want to make sure that the fw stays on the channel during the association. + */ +void iwl_mvm_protect_session(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + u32 max_delay, bool wait_for_notif); + +/** + * iwl_mvm_stop_session_protection - cancel the session protection. + * @mvm: the mvm component + * @vif: the virtual interface for which the session is issued + * + * This functions cancels the session protection which is an act of good + * citizenship. If it is not needed any more it should be canceled because + * the other bindings wait for the medium during that time. + * This funtions doesn't sleep. + */ +void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* + * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION. + */ +void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/** + * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality + * @mvm: the mvm component + * @vif: the virtual interface for which the roc is requested. It is assumed + * that the vif type is NL80211_IFTYPE_P2P_DEVICE + * @duration: the requested duration in millisecond for the fw to be on the + * channel that is bound to the vif. + * @type: the remain on channel request type + * + * This function can be used to issue a remain on channel session, + * which means that the fw will stay in the channel for the request %duration + * milliseconds. The function is async, meaning that it only issues the ROC + * request but does not wait for it to start. Once the FW is ready to serve the + * ROC request, it will issue a notification to the driver that it is on the + * requested channel. Once the FW completes the ROC request it will issue + * another notification to the driver. + */ +int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + int duration, enum ieee80211_roc_type type); + +/** + * iwl_mvm_stop_roc - stop remain on channel functionality + * @mvm: the mvm component + * + * This function can be used to cancel an ongoing ROC session. + * The function is async, it will instruct the FW to stop serving the ROC + * session, but will not wait for the actual stopping of the session. + */ +void iwl_mvm_stop_roc(struct iwl_mvm *mvm); + +/** + * iwl_mvm_remove_time_event - general function to clean up of time event + * @mvm: the mvm component + * @vif: the vif to which the time event belongs + * @te_data: the time event data that corresponds to that time event + * + * This function can be used to cancel a time event regardless its type. + * It is useful for cleaning up time events running before removing an + * interface. + */ +void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data); + +/** + * iwl_mvm_te_clear_data - remove time event from list + * @mvm: the mvm component + * @te_data: the time event data to remove + * + * This function is mostly internal, it is made available here only + * for firmware restart purposes. + */ +void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data); + +void iwl_mvm_roc_done_wk(struct work_struct *wk); + +/** + * iwl_mvm_schedule_csa_period - request channel switch absence period + * @mvm: the mvm component + * @vif: the virtual interface for which the channel switch is issued + * @duration: the duration of the NoA in TU. + * @apply_time: NoA start time in GP2. + * + * This function is used to schedule NoA time event and is used to perform + * the channel switch flow. + */ +int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 apply_time); + +/** + * iwl_mvm_te_scheduled - check if the fw received the TE cmd + * @te_data: the time event data that corresponds to that time event + * + * This function returns true iff this TE is added to the fw. + */ +static inline bool +iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data) +{ + if (!te_data) + return false; + + return !!te_data->uid; +} + +#endif /* __time_event_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c new file mode 100644 index 000000000000..4007f1d421dd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -0,0 +1,306 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "mvm.h" +#include "fw-api-tof.h" + +#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 + +void iwl_mvm_tof_init(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return; + + memset(tof_data, 0, sizeof(*tof_data)); + + tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (IWL_MVM_TOF_IS_RESPONDER) { + tof_data->responder_cfg.sub_grp_cmd_id = + cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); + tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; + } +#endif + + tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); + tof_data->range_req.req_timeout = 1; + tof_data->range_req.initiator = 1; + tof_data->range_req.report_policy = 3; + + tof_data->range_req_ext.sub_grp_cmd_id = + cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); + + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; +} + +void iwl_mvm_tof_clean(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return; + + memset(tof_data, 0, sizeof(*tof_data)); + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; +} + +static void iwl_tof_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *enabled = _data; + + /* non bss vif exists */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + *enabled = false; +} + +int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) +{ + struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; + bool enabled; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_tof_iterator, &enabled); + if (!enabled) { + IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); + return -EINVAL; + } + + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(*cmd), cmd); +} + +int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) +{ + struct iwl_tof_range_abort_cmd cmd = { + .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), + .request_id = id, + }; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (id != mvm->tof_data.active_range_request) { + IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", + id, mvm->tof_data.active_range_request); + return -EINVAL; + } + + /* after abort is sent there's no active request anymore */ + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(cmd), &cmd); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (vif->p2p || vif->type != NL80211_IFTYPE_AP || + !mvmvif->ap_ibss_active) { + IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); + return -EIO; + } + + cmd->sta_id = mvmvif->bcast_sta.sta_id; + memcpy(cmd->bssid, vif->addr, ETH_ALEN); + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(*cmd), cmd); +} +#endif + +int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len = { sizeof(mvm->tof_data.range_req), }, + /* no copy because of the command size */ + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); + return -EIO; + } + + /* nesting of range requests is not supported in FW */ + if (mvm->tof_data.active_range_request != + IWL_MVM_TOF_RANGE_REQ_MAX_ID) { + IWL_ERR(mvm, "Cannot send range req, already active req %d\n", + mvm->tof_data.active_range_request); + return -EIO; + } + + mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; + + cmd.data[0] = &mvm->tof_data.range_req; + return iwl_mvm_send_cmd(mvm, &cmd); +} + +int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); + return -EIO; + } + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(mvm->tof_data.range_req_ext), + &mvm->tof_data.range_req_ext); +} + +static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_range_rsp_ntfy *resp = (void *)data; + + if (resp->request_id != mvm->tof_data.active_range_request) { + IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", + resp->request_id, mvm->tof_data.active_range_request); + return -EIO; + } + + memcpy(&mvm->tof_data.range_resp, resp, + sizeof(struct iwl_tof_range_rsp_ntfy)); + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + + return 0; +} + +static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; + + IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); + return 0; +} + +static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_neighbor_report *report = + (struct iwl_tof_neighbor_report *)data; + + IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", + report->bssid, report->request_token, report->status); + return 0; +} + +void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; + + lockdep_assert_held(&mvm->mutex); + + switch (le32_to_cpu(resp->sub_grp_cmd_id)) { + case TOF_RANGE_RESPONSE_NOTIF: + iwl_mvm_tof_range_resp(mvm, resp->data); + break; + case TOF_MCSI_DEBUG_NOTIF: + iwl_mvm_tof_mcsi_notif(mvm, resp->data); + break; + case TOF_NEIGHBOR_REPORT_RSP_NOTIF: + iwl_mvm_tof_nb_report_notif(mvm, resp->data); + break; + default: + IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", + resp->sub_grp_cmd_id); + break; + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h new file mode 100644 index 000000000000..9beebc33cb8d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __tof_h__ +#define __tof_h__ + +#include "fw-api-tof.h" + +struct iwl_mvm_tof_data { + struct iwl_tof_config_cmd tof_cfg; + struct iwl_tof_range_req_cmd range_req; + struct iwl_tof_range_req_ext_cmd range_req_ext; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_tof_responder_config_cmd responder_cfg; +#endif + struct iwl_tof_range_rsp_ntfy range_resp; + u8 last_abort_id; + u16 active_range_request; +}; + +void iwl_mvm_tof_init(struct iwl_mvm *mvm); +void iwl_mvm_tof_clean(struct iwl_mvm *mvm); +int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm); +int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id); +int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#endif +#endif /* __tof_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c new file mode 100644 index 000000000000..cadfc0460597 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -0,0 +1,460 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "mvm.h" + +#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ + +static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + u32 duration = tt->params.ct_kill_duration; + + if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; + + IWL_ERR(mvm, "Enter CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, true); + + tt->throttle = false; + tt->dynamic_smps = false; + + /* Don't schedule an exit work if we're in test mode, since + * the temperature will not change unless we manually set it + * again (or disable testing). + */ + if (!mvm->temperature_test) + schedule_delayed_work(&tt->ct_kill_exit, + round_jiffies_relative(duration * HZ)); +} + +static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) +{ + if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; + + IWL_ERR(mvm, "Exit CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, false); +} + +void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) +{ + /* ignore the notification if we are in test mode */ + if (mvm->temperature_test) + return; + + if (mvm->temperature == temp) + return; + + mvm->temperature = temp; + iwl_mvm_tt_handler(mvm); +} + +static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_dts_measurement_notif *notif; + int len = iwl_rx_packet_payload_len(pkt); + int temp; + + if (WARN_ON_ONCE(len != sizeof(*notif))) { + IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); + return -EINVAL; + } + + notif = (void *)pkt->data; + + temp = le32_to_cpu(notif->temp); + + /* shouldn't be negative, but since it's s32, make sure it isn't */ + if (WARN_ON_ONCE(temp < 0)) + temp = 0; + + IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp); + + return temp; +} + +static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + int *temp = data; + int ret; + + ret = iwl_mvm_temp_notif_parse(mvm, pkt); + if (ret < 0) + return true; + + *temp = ret; + + return true; +} + +void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + int temp; + + /* the notification is handled synchronously in ctkill, so skip here */ + if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; + + temp = iwl_mvm_temp_notif_parse(mvm, pkt); + if (temp < 0) + return; + + iwl_mvm_tt_temp_changed(mvm, temp); +} + +static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) +{ + struct iwl_dts_measurement_cmd cmd = { + .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), + }; + struct iwl_ext_dts_measurement_cmd extcmd = { + .control_mode = cpu_to_le32(DTS_AUTOMATIC), + }; + u32 cmdid; + + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) + cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE, + PHY_OPS_GROUP, 0); + else + cmdid = CMD_DTS_MEASUREMENT_TRIGGER; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) + return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd); + + return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); +} + +int iwl_mvm_get_temp(struct iwl_mvm *mvm) +{ + struct iwl_notification_wait wait_temp_notif; + static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, + DTS_MEASUREMENT_NOTIF_WIDE) }; + int ret, temp; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) + temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; + + lockdep_assert_held(&mvm->mutex); + + iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, + temp_notif, ARRAY_SIZE(temp_notif), + iwl_mvm_temp_notif_wait, &temp); + + ret = iwl_mvm_get_temp_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret); + iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif); + return ret; + } + + ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, + IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); + if (ret) { + IWL_ERR(mvm, "Getting the temperature timed out\n"); + return ret; + } + + return temp; +} + +static void check_exit_ctkill(struct work_struct *work) +{ + struct iwl_mvm_tt_mgmt *tt; + struct iwl_mvm *mvm; + u32 duration; + s32 temp; + + tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); + mvm = container_of(tt, struct iwl_mvm, thermal_throttle); + + duration = tt->params.ct_kill_duration; + + mutex_lock(&mvm->mutex); + + if (__iwl_mvm_mac_start(mvm)) + goto reschedule; + + /* make sure the device is available for direct read/writes */ + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) { + __iwl_mvm_mac_stop(mvm); + goto reschedule; + } + + temp = iwl_mvm_get_temp(mvm); + + iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); + + __iwl_mvm_mac_stop(mvm); + + if (temp < 0) + goto reschedule; + + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); + + if (temp <= tt->params.ct_kill_exit) { + mutex_unlock(&mvm->mutex); + iwl_mvm_exit_ctkill(mvm); + return; + } + +reschedule: + mutex_unlock(&mvm->mutex); + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies(duration * HZ)); +} + +static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = _data; + enum ieee80211_smps_mode smps_mode; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->thermal_throttle.dynamic_smps) + smps_mode = IEEE80211_SMPS_DYNAMIC; + else + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); +} + +static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int i, err; + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (enable == mvmsta->tt_tx_protection) + continue; + err = iwl_mvm_tx_protection(mvm, mvmsta, enable); + if (err) { + IWL_ERR(mvm, "Failed to %s Tx protection\n", + enable ? "enable" : "disable"); + } else { + IWL_DEBUG_TEMP(mvm, "%s Tx protection\n", + enable ? "Enable" : "Disable"); + mvmsta->tt_tx_protection = enable; + } + } +} + +void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_THERMAL_MNG_BACKOFF, + .len = { sizeof(u32), }, + .data = { &backoff, }, + }; + + backoff = max(backoff, mvm->thermal_throttle.min_backoff); + + if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { + IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", + backoff); + mvm->thermal_throttle.tx_backoff = backoff; + } else { + IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n"); + } +} + +void iwl_mvm_tt_handler(struct iwl_mvm *mvm) +{ + struct iwl_tt_params *params = &mvm->thermal_throttle.params; + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + s32 temperature = mvm->temperature; + bool throttle_enable = false; + int i; + u32 tx_backoff; + + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature); + + if (params->support_ct_kill && temperature >= params->ct_kill_entry) { + iwl_mvm_enter_ctkill(mvm); + return; + } + + if (params->support_ct_kill && + temperature <= params->ct_kill_exit) { + iwl_mvm_exit_ctkill(mvm); + return; + } + + if (params->support_dynamic_smps) { + if (!tt->dynamic_smps && + temperature >= params->dynamic_smps_entry) { + IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n"); + tt->dynamic_smps = true; + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tt_smps_iterator, mvm); + throttle_enable = true; + } else if (tt->dynamic_smps && + temperature <= params->dynamic_smps_exit) { + IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n"); + tt->dynamic_smps = false; + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tt_smps_iterator, mvm); + } + } + + if (params->support_tx_protection) { + if (temperature >= params->tx_protection_entry) { + iwl_mvm_tt_tx_protection(mvm, true); + throttle_enable = true; + } else if (temperature <= params->tx_protection_exit) { + iwl_mvm_tt_tx_protection(mvm, false); + } + } + + if (params->support_tx_backoff) { + tx_backoff = tt->min_backoff; + for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { + if (temperature < params->tx_backoff[i].temperature) + break; + tx_backoff = max(tt->min_backoff, + params->tx_backoff[i].backoff); + } + if (tx_backoff != tt->min_backoff) + throttle_enable = true; + if (tt->tx_backoff != tx_backoff) + iwl_mvm_tt_tx_backoff(mvm, tx_backoff); + } + + if (!tt->throttle && throttle_enable) { + IWL_WARN(mvm, + "Due to high temperature thermal throttling initiated\n"); + tt->throttle = true; + } else if (tt->throttle && !tt->dynamic_smps && + tt->tx_backoff == tt->min_backoff && + temperature <= params->tx_protection_exit) { + IWL_WARN(mvm, + "Temperature is back to normal thermal throttling stopped\n"); + tt->throttle = false; + } +} + +static const struct iwl_tt_params iwl_mvm_default_tt_params = { + .ct_kill_entry = 118, + .ct_kill_exit = 96, + .ct_kill_duration = 5, + .dynamic_smps_entry = 114, + .dynamic_smps_exit = 110, + .tx_protection_entry = 114, + .tx_protection_exit = 108, + .tx_backoff = { + {.temperature = 112, .backoff = 200}, + {.temperature = 113, .backoff = 600}, + {.temperature = 114, .backoff = 1200}, + {.temperature = 115, .backoff = 2000}, + {.temperature = 116, .backoff = 4000}, + {.temperature = 117, .backoff = 10000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) +{ + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + + IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); + + if (mvm->cfg->thermal_params) + tt->params = *mvm->cfg->thermal_params; + else + tt->params = iwl_mvm_default_tt_params; + + tt->throttle = false; + tt->dynamic_smps = false; + tt->min_backoff = min_backoff; + INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); +} + +void iwl_mvm_tt_exit(struct iwl_mvm *mvm) +{ + cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); + IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c new file mode 100644 index 000000000000..c652a66be803 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -0,0 +1,1115 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include + +#include "iwl-trans.h" +#include "iwl-eeprom-parse.h" +#include "mvm.h" +#include "sta.h" + +static void +iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, + u16 tid, u16 ssn) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "BAR sent to %pM, tid %d, ssn %d", + addr, tid, ssn); +} + +/* + * Sets most of the Tx cmd's fields + */ +void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, u8 sta_id) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); + u32 len = skb->len + FCS_LEN; + u8 ac; + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + tx_flags |= TX_CMD_FLG_ACK; + else + tx_flags &= ~TX_CMD_FLG_ACK; + + if (ieee80211_is_probe_resp(fc)) + tx_flags |= TX_CMD_FLG_TSF; + + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + } else if (ieee80211_is_back_req(fc)) { + struct ieee80211_bar *bar = (void *)skb->data; + u16 control = le16_to_cpu(bar->control); + u16 ssn = le16_to_cpu(bar->start_seq_num); + + tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; + tx_cmd->tid_tspec = (control & + IEEE80211_BAR_CTRL_TID_INFO_MASK) >> + IEEE80211_BAR_CTRL_TID_INFO_SHIFT; + WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); + iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, + ssn); + } else { + tx_cmd->tid_tspec = IWL_TID_NON_QOS; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + tx_flags |= TX_CMD_FLG_SEQ_CTL; + else + tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + } + + /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ + if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) + ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; + else + ac = tid_to_mac80211_ac[0]; + + tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << + TX_CMD_FLG_BT_PRIO_POS; + + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); + else if (ieee80211_is_action(fc)) + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); + else + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); + + /* The spec allows Action frames in A-MPDU, we don't support + * it + */ + WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); + } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); + } else { + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); + } + + if (ieee80211_is_data(fc) && len > mvm->rts_threshold && + !is_multicast_ether_addr(ieee80211_get_DA(hdr))) + tx_flags |= TX_CMD_FLG_PROT_REQUIRE; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && + ieee80211_action_contains_tpc(skb)) + tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; + + tx_cmd->tx_flags = cpu_to_le32(tx_flags); + /* Total # bytes to be transmitted */ + tx_cmd->len = cpu_to_le16((u16)skb->len); + tx_cmd->next_frame_len = 0; + tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + tx_cmd->sta_id = sta_id; +} + +/* + * Sets the fields in the Tx cmd that are rate related + */ +void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) +{ + u32 rate_flags; + int rate_idx; + u8 rate_plcp; + + /* Set retry limit on RTS packets */ + tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) { + tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; + tx_cmd->rts_retry_limit = + min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); + } else if (ieee80211_is_back_req(fc)) { + tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; + } else { + tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; + } + + /* + * for data packets, rate info comes from the table inside the fw. This + * table is controlled by LINK_QUALITY commands + */ + + if (ieee80211_is_data(fc) && sta) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + return; + } else if (ieee80211_is_back_req(fc)) { + tx_cmd->tx_flags |= + cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); + } + + /* HT rate doesn't make sense for a non data frame */ + WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, + "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", + info->control.rates[0].flags, + info->control.rates[0].idx, + le16_to_cpu(fc)); + + rate_idx = info->control.rates[0].idx; + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY) + rate_idx = rate_lowest_index( + &mvm->nvm_data->bands[info->band], sta); + + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); + + mvm->mgmt_last_antenna_idx = + iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), + mvm->mgmt_last_antenna_idx); + + if (info->band == IEEE80211_BAND_2GHZ && + !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; + else + rate_flags = + BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); +} + +/* + * Sets the fields in the Tx cmd that are crypto related + */ +static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int hdrlen) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + u8 *crypto_hdr = skb_frag->data + hdrlen; + u64 pn; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); + pn = atomic64_inc_return(&keyconf->tx_pn); + crypto_hdr[0] = pn; + crypto_hdr[2] = 0; + crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); + crypto_hdr[1] = pn >> 8; + crypto_hdr[4] = pn >> 16; + crypto_hdr[5] = pn >> 24; + crypto_hdr[6] = pn >> 32; + crypto_hdr[7] = pn >> 40; + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | + ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & + TX_CMD_SEC_WEP_KEY_IDX_MSK); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + break; + default: + tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; + } +} + +/* + * Allocates and sets the Tx cmd the driver data pointers in the skb + */ +static struct iwl_device_cmd * +iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, + int hdrlen, struct ieee80211_sta *sta, u8 sta_id) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + + dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); + + if (unlikely(!dev_cmd)) + return NULL; + + memset(dev_cmd, 0, sizeof(*dev_cmd)); + dev_cmd->hdr.cmd = TX_CMD; + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + + if (info->control.hw_key) + iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); + + iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); + + iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); + + memset(&info->status, 0, sizeof(info->status)); + + info->driver_data[0] = NULL; + info->driver_data[1] = dev_cmd; + + return dev_cmd; +} + +int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + u8 sta_id; + int hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) + return -1; + + if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && + (!info->control.vif || + info->hw_queue != info->control.vif->cab_queue))) + return -1; + + /* + * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used + * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel + * queue. STATION (HS2.0) uses the auxiliary context of the FW, + * and hence needs to be sent on the aux queue + */ + if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + info->control.vif->type == NL80211_IFTYPE_STATION) + IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; + + /* + * If the interface on which the frame is sent is the P2P_DEVICE + * or an AP/GO interface use the broadcast station associated + * with it; otherwise if the interface is a managed interface + * use the AP station associated with it for multicast traffic + * (this is not possible for unicast packets as a TLDS discovery + * response are sent without a station entry); otherwise use the + * AUX station. + */ + sta_id = mvm->aux_sta.sta_id; + if (info->control.vif) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + + if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || + info->control.vif->type == NL80211_IFTYPE_AP) + sta_id = mvmvif->bcast_sta.sta_id; + else if (info->control.vif->type == NL80211_IFTYPE_STATION && + is_multicast_ether_addr(hdr->addr1)) { + u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); + + if (ap_sta_id != IWL_MVM_STATION_COUNT) + sta_id = ap_sta_id; + } + } + + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); + + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); + if (!dev_cmd) + return -1; + + /* From now on, we cannot access info->control */ + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdrlen); + + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + return -1; + } + + return 0; +} + +/* + * Sets the fields in the Tx cmd that are crypto related + */ +int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_mvm_sta *mvmsta; + struct iwl_device_cmd *dev_cmd; + struct iwl_tx_cmd *tx_cmd; + __le16 fc; + u16 seq_number = 0; + u8 tid = IWL_MAX_TID_COUNT; + u8 txq_id = info->hw_queue; + bool is_data_qos = false, is_ampdu = false; + int hdrlen; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + fc = hdr->frame_control; + hdrlen = ieee80211_hdrlen(fc); + + if (WARN_ON_ONCE(!mvmsta)) + return -1; + + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + return -1; + + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); + if (!dev_cmd) + goto drop; + + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + /* From now on, we cannot access info->control */ + + /* + * we handle that entirely ourselves -- for uAPSD the firmware + * will always send a notification, and for PS-Poll responses + * we'll notify mac80211 when getting frame status + */ + info->flags &= ~IEEE80211_TX_STATUS_EOSP; + + spin_lock(&mvmsta->lock); + + if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { + u8 *qc = NULL; + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + goto drop_unlock_sta; + + seq_number = mvmsta->tid_data[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + is_data_qos = true; + is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; + } + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdrlen); + + WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); + + if (sta->tdls) { + /* default to TID 0 for non-QoS packets */ + u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; + + txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; + } + + if (is_ampdu) { + if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) + goto drop_unlock_sta; + txq_id = mvmsta->tid_data[tid].txq_id; + } + + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, + tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); + + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) + goto drop_unlock_sta; + + if (is_data_qos && !ieee80211_has_morefrags(fc)) + mvmsta->tid_data[tid].seq_number = seq_number + 0x10; + + spin_unlock(&mvmsta->lock); + + if (txq_id < mvm->first_agg_queue) + atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); + + return 0; + +drop_unlock_sta: + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + spin_unlock(&mvmsta->lock); +drop: + return -1; +} + +static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + struct ieee80211_vif *vif = mvmsta->vif; + + lockdep_assert_held(&mvmsta->lock); + + if ((tid_data->state == IWL_AGG_ON || + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && + iwl_mvm_tid_queued(tid_data) == 0) { + /* + * Now that this aggregation queue is empty tell mac80211 so it + * knows we no longer have frames buffered for the station on + * this TID (for the TIM bitmap calculation.) + */ + ieee80211_sta_set_buffered(sta, tid, false); + } + + if (tid_data->ssn != tid_data->next_reclaimed) + return; + + switch (tid_data->state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + IWL_DEBUG_TX_QUEUES(mvm, + "Can continue addBA flow ssn = next_recl = %d\n", + tid_data->next_reclaimed); + tid_data->state = IWL_AGG_STARTING; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + + case IWL_EMPTYING_HW_QUEUE_DELBA: + IWL_DEBUG_TX_QUEUES(mvm, + "Can continue DELBA flow ssn = next_recl = %d\n", + tid_data->next_reclaimed); + iwl_mvm_disable_txq(mvm, tid_data->txq_id, + vif->hw_queue[tid_to_mac80211_ac[tid]], tid, + CMD_ASYNC); + tid_data->state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + + default: + break; + } +} + +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_mvm_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(BT_PRIO); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(SMALL_CF_POLL); + TX_STATUS_FAIL(FW_DROP); + TX_STATUS_FAIL(STA_COLOR_MISMATCH); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, + enum ieee80211_band band, + struct ieee80211_tx_rate *r) +{ + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case RATE_MCS_CHAN_WIDTH_80: + r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + case RATE_MCS_CHAN_WIDTH_160: + r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; + break; + } + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + if (rate_n_flags & RATE_MCS_HT_MSK) { + r->flags |= IEEE80211_TX_RC_MCS; + r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + ieee80211_rate_set_vht( + r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1); + r->flags |= IEEE80211_TX_RC_VHT_MCS; + } else { + r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + band); + } +} + +/** + * translate ucode response to mac80211 tx status control values + */ +static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->status.rates[0]; + + info->status.antenna = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); +} + +static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct ieee80211_sta *sta; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); + u32 status = le16_to_cpu(tx_resp->status.status); + u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); + struct iwl_mvm_sta *mvmsta; + struct sk_buff_head skbs; + u8 skb_freed = 0; + u16 next_reclaimed, seq_ctl; + + __skb_queue_head_init(&skbs); + + seq_ctl = le16_to_cpu(tx_resp->seq_ctl); + + /* we can free until ssn % q.n_bd not inclusive */ + iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + skb_freed++; + + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + + /* inform mac80211 about what happened with the frame */ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + info->flags |= IEEE80211_TX_STAT_ACK; + break; + case TX_STATUS_FAIL_DEST_PS: + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + break; + default: + break; + } + + info->status.rates[0].count = tx_resp->failure_frame + 1; + iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), + info); + info->status.status_driver_data[1] = + (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); + + /* Single frame failure in an AMPDU queue => send BAR */ + if (txq_id >= mvm->first_agg_queue && + !(info->flags & IEEE80211_TX_STAT_ACK) && + !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + /* W/A FW bug: seq_ctl is wrong when the status isn't success */ + if (status != TX_STATUS_SUCCESS) { + struct ieee80211_hdr *hdr = (void *)skb->data; + seq_ctl = le16_to_cpu(hdr->seq_ctrl); + } + + /* + * TODO: this is not accurate if we are freeing more than one + * packet. + */ + info->status.tx_time = + le16_to_cpu(tx_resp->wireless_media_time); + BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); + info->status.status_driver_data[0] = + (void *)(uintptr_t)tx_resp->reduced_tpc; + + ieee80211_tx_status(mvm->hw, skb); + } + + if (txq_id >= mvm->first_agg_queue) { + /* If this is an aggregation queue, we use the ssn since: + * ssn = wifi seq_num % 256. + * The seq_ctl is the sequence control of the packet to which + * this Tx response relates. But if there is a hole in the + * bitmap of the BA we received, this Tx response may allow to + * reclaim the hole and all the subsequent packets that were + * already acked. In that case, seq_ctl != ssn, and the next + * packet to be reclaimed will be ssn and not seq_ctl. In that + * case, several packets will be reclaimed even if + * frame_count = 1. + * + * The ssn is the index (% 256) of the latest packet that has + * treated (acked / dropped) + 1. + */ + next_reclaimed = ssn; + } else { + /* The next packet to be reclaimed is the one after this one */ + next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); + } + + IWL_DEBUG_TX_REPLY(mvm, + "TXQ %d status %s (0x%08x)\n", + txq_id, iwl_mvm_get_tx_fail_reason(status), status); + + IWL_DEBUG_TX_REPLY(mvm, + "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", + le32_to_cpu(tx_resp->initial_rate), + tx_resp->failure_frame, SEQ_TO_INDEX(sequence), + ssn, next_reclaimed, seq_ctl); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + /* + * sta can't be NULL otherwise it'd mean that the sta has been freed in + * the firmware while we still have packets for it in the Tx queues. + */ + if (WARN_ON_ONCE(!sta)) + goto out; + + if (!IS_ERR(sta)) { + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (tid != IWL_TID_NON_QOS) { + struct iwl_mvm_tid_data *tid_data = + &mvmsta->tid_data[tid]; + + spin_lock_bh(&mvmsta->lock); + tid_data->next_reclaimed = next_reclaimed; + IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", + next_reclaimed); + iwl_mvm_check_ratid_empty(mvm, sta, tid); + spin_unlock_bh(&mvmsta->lock); + } + + if (mvmsta->next_status_eosp) { + mvmsta->next_status_eosp = false; + ieee80211_sta_eosp(sta); + } + } else { + mvmsta = NULL; + } + + /* + * If the txq is not an AMPDU queue, there is no chance we freed + * several skbs. Check that out... + */ + if (txq_id >= mvm->first_agg_queue) + goto out; + + /* We can't free more than one frame at once on a shared queue */ + WARN_ON(skb_freed > 1); + + /* If we have still frames for this STA nothing to do here */ + if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) + goto out; + + if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) { + + /* + * If there are no pending frames for this STA and + * the tx to this station is not disabled, notify + * mac80211 that this station can now wake up in its + * STA table. + * If mvmsta is not NULL, sta is valid. + */ + + spin_lock_bh(&mvmsta->lock); + + if (!mvmsta->disable_tx) + ieee80211_sta_block_awake(mvm->hw, sta, false); + + spin_unlock_bh(&mvmsta->lock); + } + + if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) { + /* + * We are draining and this was the last packet - pre_rcu_remove + * has been called already. We might be after the + * synchronize_net already. + * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues. + */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } + +out: + rcu_read_unlock(); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x +static const char *iwl_get_agg_tx_status(u16 status) +{ + switch (status & AGG_TX_STATE_STATUS_MSK) { + AGG_TX_STATE_(TRANSMITTED); + AGG_TX_STATE_(UNDERRUN); + AGG_TX_STATE_(BT_PRIO); + AGG_TX_STATE_(FEW_BYTES); + AGG_TX_STATE_(ABORT); + AGG_TX_STATE_(LAST_SENT_TTL); + AGG_TX_STATE_(LAST_SENT_TRY_CNT); + AGG_TX_STATE_(LAST_SENT_BT_KILL); + AGG_TX_STATE_(SCD_QUERY); + AGG_TX_STATE_(TEST_BAD_CRC32); + AGG_TX_STATE_(RESPONSE); + AGG_TX_STATE_(DUMP_TX); + AGG_TX_STATE_(DELAY_TX); + } + + return "UNKNOWN"; +} + +static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct agg_tx_status *frame_status = &tx_resp->status; + int i; + + for (i = 0; i < tx_resp->frame_count; i++) { + u16 fstatus = le16_to_cpu(frame_status[i].status); + + IWL_DEBUG_TX_REPLY(mvm, + "status %s (0x%04x), try-count (%d) seq (0x%x)\n", + iwl_get_agg_tx_status(fstatus), + fstatus & AGG_TX_STATE_STATUS_MSK, + (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> + AGG_TX_STATE_TRY_CNT_POS, + le16_to_cpu(frame_status[i].sequence)); + } +} +#else +static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + struct ieee80211_sta *sta; + + if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue)) + return; + + if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) + return; + + iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmsta->tid_data[tid].rate_n_flags = + le32_to_cpu(tx_resp->initial_rate); + mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc; + mvmsta->tid_data[tid].tx_time = + le16_to_cpu(tx_resp->wireless_media_time); + } + + rcu_read_unlock(); +} + +void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + + if (tx_resp->frame_count == 1) + iwl_mvm_rx_tx_cmd_single(mvm, pkt); + else + iwl_mvm_rx_tx_cmd_agg(mvm, pkt); +} + +static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, + struct iwl_mvm_ba_notif *ba_notif, + struct iwl_mvm_tid_data *tid_data) +{ + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = ba_notif->txed_2_done; + info->status.ampdu_len = ba_notif->txed; + iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, + info); + /* TODO: not accounted if the whole A-MPDU failed */ + info->status.tx_time = tid_data->tx_time; + info->status.status_driver_data[0] = + (void *)(uintptr_t)tid_data->reduced_tpc; + info->status.status_driver_data[1] = + (void *)(uintptr_t)tid_data->rate_n_flags; +} + +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; + struct sk_buff_head reclaimed_skbs; + struct iwl_mvm_tid_data *tid_data; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + struct sk_buff *skb; + int sta_id, tid, freed; + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); + + sta_id = ba_notif->sta_id; + tid = ba_notif->tid; + + if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || + tid >= IWL_MAX_TID_COUNT, + "sta_id %d tid %d", sta_id, tid)) + return; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* Reclaiming frames for a station that has been deleted ? */ + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + tid_data = &mvmsta->tid_data[tid]; + + if (tid_data->txq_id != scd_flow) { + IWL_ERR(mvm, + "invalid BA notification: Q %d, tid %d, flow %d\n", + tid_data->txq_id, tid, scd_flow); + rcu_read_unlock(); + return; + } + + spin_lock_bh(&mvmsta->lock); + + __skb_queue_head_init(&reclaimed_skbs); + + /* + * Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). + */ + iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn, + &reclaimed_skbs); + + IWL_DEBUG_TX_REPLY(mvm, + "BA_NOTIFICATION Received from %pM, sta_id = %d\n", + (u8 *)&ba_notif->sta_addr_lo32, + ba_notif->sta_id); + IWL_DEBUG_TX_REPLY(mvm, + "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", + ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), + (unsigned long long)le64_to_cpu(ba_notif->bitmap), + scd_flow, ba_resp_scd_ssn, ba_notif->txed, + ba_notif->txed_2_done); + + tid_data->next_reclaimed = ba_resp_scd_ssn; + + iwl_mvm_check_ratid_empty(mvm, sta, tid); + + freed = 0; + + skb_queue_walk(&reclaimed_skbs, skb) { + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (ieee80211_is_data_qos(hdr->frame_control)) + freed++; + else + WARN_ON_ONCE(1); + + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + + /* this is the first skb we deliver in this batch */ + /* put the rate scaling data there */ + if (freed == 1) + iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data); + } + + spin_unlock_bh(&mvmsta->lock); + + /* We got a BA notif with 0 acked or scd_ssn didn't progress which is + * possible (i.e. first MPDU in the aggregation wasn't acked) + * Still it's important to update RS about sent vs. acked. + */ + if (skb_queue_empty(&reclaimed_skbs)) { + struct ieee80211_tx_info ba_info = {}; + struct ieee80211_chanctx_conf *chanctx_conf = NULL; + + if (mvmsta->vif) + chanctx_conf = + rcu_dereference(mvmsta->vif->chanctx_conf); + + if (WARN_ON_ONCE(!chanctx_conf)) + goto out; + + ba_info.band = chanctx_conf->def.chan->band; + iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data); + + IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); + iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info); + } + +out: + rcu_read_unlock(); + + while (!skb_queue_empty(&reclaimed_skbs)) { + skb = __skb_dequeue(&reclaimed_skbs); + ieee80211_tx_status(mvm->hw, skb); + } +} + +/* + * Note that there are transports that buffer frames before they reach + * the firmware. This means that after flush_tx_path is called, the + * queue might not be empty. The race-free way to handle this is to: + * 1) set the station as draining + * 2) flush the Tx path + * 3) wait for the transport queues to be empty + */ +int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) +{ + int ret; + struct iwl_tx_path_flush_cmd flush_cmd = { + .queues_ctl = cpu_to_le32(tfd_msk), + .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, + sizeof(flush_cmd), &flush_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c new file mode 100644 index 000000000000..ad0f16909e2e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -0,0 +1,1083 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include + +#include "iwl-debug.h" +#include "iwl-io.h" +#include "iwl-prph.h" + +#include "mvm.h" +#include "fw-api-rs.h" + +/* + * Will return 0 even if the cmd failed when RFKILL is asserted unless + * CMD_WANT_SKB is set in cmd->flags. + */ +int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) +{ + int ret; + +#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) + if (WARN_ON(mvm->d3_test_active)) + return -EIO; +#endif + + /* + * Synchronous commands from this op-mode must hold + * the mutex, this ensures we don't try to send two + * (or more) synchronous commands at a time. + */ + if (!(cmd->flags & CMD_ASYNC)) + lockdep_assert_held(&mvm->mutex); + + ret = iwl_trans_send_cmd(mvm->trans, cmd); + + /* + * If the caller wants the SKB, then don't hide any problems, the + * caller might access the response buffer which will be NULL if + * the command failed. + */ + if (cmd->flags & CMD_WANT_SKB) + return ret; + + /* Silently ignore failures if RFKILL is asserted */ + if (!ret || ret == -ERFKILL) + return 0; + return ret; +} + +int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, + u32 flags, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { len, }, + .data = { data, }, + .flags = flags, + }; + + return iwl_mvm_send_cmd(mvm, &cmd); +} + +/* + * We assume that the caller set the status to the success value + */ +int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, + u32 *status) +{ + struct iwl_rx_packet *pkt; + struct iwl_cmd_response *resp; + int ret, resp_len; + + lockdep_assert_held(&mvm->mutex); + +#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) + if (WARN_ON(mvm->d3_test_active)) + return -EIO; +#endif + + /* + * Only synchronous commands can wait for status, + * we use WANT_SKB so the caller can't. + */ + if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), + "cmd flags %x", cmd->flags)) + return -EINVAL; + + cmd->flags |= CMD_WANT_SKB; + + ret = iwl_trans_send_cmd(mvm->trans, cmd); + if (ret == -ERFKILL) { + /* + * The command failed because of RFKILL, don't update + * the status, leave it as success and return 0. + */ + return 0; + } else if (ret) { + return ret; + } + + pkt = cmd->resp_pkt; + /* Can happen if RFKILL is asserted */ + if (!pkt) { + ret = 0; + goto out_free_resp; + } + + resp_len = iwl_rx_packet_payload_len(pkt); + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + ret = -EIO; + goto out_free_resp; + } + + resp = (void *)pkt->data; + *status = le32_to_cpu(resp->status); + out_free_resp: + iwl_free_resp(cmd); + return ret; +} + +/* + * We assume that the caller set the status to the sucess value + */ +int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, + const void *data, u32 *status) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { len, }, + .data = { data, }, + }; + + return iwl_mvm_send_cmd_status(mvm, &cmd, status); +} + +#define IWL_DECLARE_RATE_INFO(r) \ + [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP + +/* + * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP + */ +static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1), + IWL_DECLARE_RATE_INFO(2), + IWL_DECLARE_RATE_INFO(5), + IWL_DECLARE_RATE_INFO(11), + IWL_DECLARE_RATE_INFO(6), + IWL_DECLARE_RATE_INFO(9), + IWL_DECLARE_RATE_INFO(12), + IWL_DECLARE_RATE_INFO(18), + IWL_DECLARE_RATE_INFO(24), + IWL_DECLARE_RATE_INFO(36), + IWL_DECLARE_RATE_INFO(48), + IWL_DECLARE_RATE_INFO(54), +}; + +int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, + enum ieee80211_band band) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; + int idx; + int band_offset = 0; + + /* Legacy rate format, search for match in table */ + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (fw_rate_idx_to_plcp[idx] == rate) + return idx - band_offset; + + return -1; +} + +u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) +{ + /* Get PLCP rate for tx_cmd->rate_n_flags */ + return fw_rate_idx_to_plcp[rate_idx]; +} + +void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_error_resp *err_resp = (void *)pkt->data; + + IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", + le32_to_cpu(err_resp->error_type), err_resp->cmd_id); + IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", + le16_to_cpu(err_resp->bad_cmd_seq_num), + le32_to_cpu(err_resp->error_service)); + IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", + le64_to_cpu(err_resp->timestamp)); +} + +/* + * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. + * The parameter should also be a combination of ANT_[ABC]. + */ +u8 first_antenna(u8 mask) +{ + BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ + if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ + return BIT(0); + return BIT(ffs(mask) - 1); +} + +/* + * Toggles between TX antennas to send the probe request on. + * Receives the bitmask of valid TX antennas and the *index* used + * for the last TX, and returns the next valid *index* to use. + * In order to set it in the tx_cmd, must do BIT(idx). + */ +u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) +{ + u8 ind = last_idx; + int i; + + for (i = 0; i < RATE_MCS_ANT_NUM; i++) { + ind = (ind + 1) % RATE_MCS_ANT_NUM; + if (valid & BIT(ind)) + return ind; + } + + WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); + return last_idx; +} + +static const struct { + const char *name; + u8 num; +} advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *desc_lookup(u32 num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) + if (advanced_lookup[i].num == num) + return advanced_lookup[i].name; + + /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ + return advanced_lookup[i].name; +} + +/* + * Note: This structure is read from the device with IO accesses, + * and the reading already does the endian conversion. As it is + * read with u32-sized accesses, any members with a different size + * need to be ordered correctly though! + */ +struct iwl_error_event_table_v1 { + u32 valid; /* (nonzero) valid, (0) log is empty */ + u32 error_id; /* type of error */ + u32 pc; /* program counter */ + u32 blink1; /* branch link */ + u32 blink2; /* branch link */ + u32 ilink1; /* interrupt link */ + u32 ilink2; /* interrupt link */ + u32 data1; /* error-specific data */ + u32 data2; /* error-specific data */ + u32 data3; /* error-specific data */ + u32 bcon_time; /* beacon timer */ + u32 tsf_low; /* network timestamp function timer */ + u32 tsf_hi; /* network timestamp function timer */ + u32 gp1; /* GP1 timer register */ + u32 gp2; /* GP2 timer register */ + u32 gp3; /* GP3 timer register */ + u32 ucode_ver; /* uCode version */ + u32 hw_ver; /* HW Silicon version */ + u32 brd_ver; /* HW board version */ + u32 log_pc; /* log program counter */ + u32 frame_ptr; /* frame pointer */ + u32 stack_ptr; /* stack pointer */ + u32 hcmd; /* last host command header */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: + * rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: + * host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: + * enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: + * time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: + * wico interrupt */ + u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 wait_event; /* wait event() caller address */ + u32 l2p_control; /* L2pControlField */ + u32 l2p_duration; /* L2pDurationField */ + u32 l2p_mhvalid; /* L2pMhValidBits */ + u32 l2p_addr_match; /* L2pAddrMatchStat */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on + * (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the + * compilation */ + u32 flow_handler; /* FH read/write pointers, RX credit */ +} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; + +struct iwl_error_event_table { + u32 valid; /* (nonzero) valid, (0) log is empty */ + u32 error_id; /* type of error */ + u32 pc; /* program counter */ + u32 blink1; /* branch link */ + u32 blink2; /* branch link */ + u32 ilink1; /* interrupt link */ + u32 ilink2; /* interrupt link */ + u32 data1; /* error-specific data */ + u32 data2; /* error-specific data */ + u32 data3; /* error-specific data */ + u32 bcon_time; /* beacon timer */ + u32 tsf_low; /* network timestamp function timer */ + u32 tsf_hi; /* network timestamp function timer */ + u32 gp1; /* GP1 timer register */ + u32 gp2; /* GP2 timer register */ + u32 gp3; /* GP3 timer register */ + u32 major; /* uCode version major */ + u32 minor; /* uCode version minor */ + u32 hw_ver; /* HW Silicon version */ + u32 brd_ver; /* HW board version */ + u32 log_pc; /* log program counter */ + u32 frame_ptr; /* frame pointer */ + u32 stack_ptr; /* stack pointer */ + u32 hcmd; /* last host command header */ + u32 isr0; /* isr status register LMPM_NIC_ISR0: + * rxtx_flag */ + u32 isr1; /* isr status register LMPM_NIC_ISR1: + * host_flag */ + u32 isr2; /* isr status register LMPM_NIC_ISR2: + * enc_flag */ + u32 isr3; /* isr status register LMPM_NIC_ISR3: + * time_flag */ + u32 isr4; /* isr status register LMPM_NIC_ISR4: + * wico interrupt */ + u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 wait_event; /* wait event() caller address */ + u32 l2p_control; /* L2pControlField */ + u32 l2p_duration; /* L2pDurationField */ + u32 l2p_mhvalid; /* L2pMhValidBits */ + u32 l2p_addr_match; /* L2pAddrMatchStat */ + u32 lmpm_pmg_sel; /* indicate which clocks are turned on + * (LMPM_PMG_SEL) */ + u32 u_timestamp; /* indicate when the date and time of the + * compilation */ + u32 flow_handler; /* FH read/write pointers, RX credit */ +} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; + +/* + * UMAC error struct - relevant starting from family 8000 chip. + * Note: This structure is read from the device with IO accesses, + * and the reading already does the endian conversion. As it is + * read with u32-sized accesses, any members with a different size + * need to be ordered correctly though! + */ +struct iwl_umac_error_event_table { + u32 valid; /* (nonzero) valid, (0) log is empty */ + u32 error_id; /* type of error */ + u32 blink1; /* branch link */ + u32 blink2; /* branch link */ + u32 ilink1; /* interrupt link */ + u32 ilink2; /* interrupt link */ + u32 data1; /* error-specific data */ + u32 data2; /* error-specific data */ + u32 data3; /* error-specific data */ + u32 umac_major; + u32 umac_minor; + u32 frame_pointer; /* core register 27*/ + u32 stack_pointer; /* core register 28 */ + u32 cmd_header; /* latest host cmd sent to UMAC */ + u32 nic_isr_pref; /* ISR status register */ +} __packed; + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + struct iwl_umac_error_event_table table; + u32 base; + + base = mvm->umac_error_event_table; + + if (base < 0x800000) { + IWL_ERR(mvm, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (mvm->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + mvm->status, table.valid); + } + + IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); + IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); + IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); + IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); + IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); + IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); + IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); + IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); + IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); + IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); + IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); +} + +static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + struct iwl_error_event_table_v1 table; + u32 base; + + base = mvm->error_event_table; + if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = mvm->fw->init_errlog_ptr; + } else { + if (!base) + base = mvm->fw->inst_errlog_ptr; + } + + if (base < 0x800000) { + IWL_ERR(mvm, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (mvm->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + mvm->status, table.valid); + } + + /* Do not change this output - scripts rely on it */ + + IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); + + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, + table.data1, table.data2, table.data3, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.ucode_ver, 0, + table.hw_ver, table.brd_ver); + IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); + IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(mvm, "0x%08X | data1\n", table.data1); + IWL_ERR(mvm, "0x%08X | data2\n", table.data2); + IWL_ERR(mvm, "0x%08X | data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); + IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); + IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); + IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); + IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); + IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); + IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); + IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); + + if (mvm->support_umac_log) + iwl_mvm_dump_umac_error_log(mvm); +} + +void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + struct iwl_error_event_table table; + u32 base; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { + iwl_mvm_dump_nic_error_log_old(mvm); + return; + } + + base = mvm->error_event_table; + if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = mvm->fw->init_errlog_ptr; + } else { + if (!base) + base = mvm->fw->inst_errlog_ptr; + } + + if (base < 0x800000) { + IWL_ERR(mvm, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (mvm->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + mvm->status, table.valid); + } + + /* Do not change this output - scripts rely on it */ + + IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); + + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, + table.data1, table.data2, table.data3, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.major, + table.minor, table.hw_ver, table.brd_ver); + IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); + IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(mvm, "0x%08X | data1\n", table.data1); + IWL_ERR(mvm, "0x%08X | data2\n", table.data2); + IWL_ERR(mvm, "0x%08X | data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); + IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); + IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); + IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); + IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); + IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); + IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); + IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); + IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); + + if (mvm->support_umac_log) + iwl_mvm_dump_umac_error_log(mvm); +} + +int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) +{ + int i; + + lockdep_assert_held(&mvm->queue_info_lock); + + for (i = minq; i <= maxq; i++) + if (mvm->queue_info[i].hw_queue_refcount == 0 && + !mvm->queue_info[i].setup_reserved) + return i; + + return -ENOSPC; +} + +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + bool enable_queue = true; + + spin_lock_bh(&mvm->queue_info_lock); + + /* Make sure this TID isn't already enabled */ + if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n", + cfg->tid); + return; + } + + /* Update mappings and refcounts */ + mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); + mvm->queue_info[queue].hw_queue_refcount++; + if (mvm->queue_info[queue].hw_queue_refcount > 1) + enable_queue = false; + mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); + + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", + queue, mvm->queue_info[queue].hw_queue_refcount, + mvm->queue_info[queue].hw_queue_to_mac80211); + + spin_unlock_bh(&mvm->queue_info_lock); + + /* Send the enabling command if we need to */ + if (enable_queue) { + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 1, + .window = cfg->frame_limit, + .sta_id = cfg->sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = cfg->fifo, + .aggregate = cfg->aggregate, + .tid = cfg->tid, + }; + + iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, + wdg_timeout); + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), + &cmd), + "Failed to configure queue %d on FIFO %d\n", queue, + cfg->fifo); + } +} + +void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 0, + }; + bool remove_mac_queue = true; + int ret; + + spin_lock_bh(&mvm->queue_info_lock); + + if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { + spin_unlock_bh(&mvm->queue_info_lock); + return; + } + + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); + + /* + * If there is another TID with the same AC - don't remove the MAC queue + * from the mapping + */ + if (tid < IWL_MAX_TID_COUNT) { + unsigned long tid_bitmap = + mvm->queue_info[queue].tid_bitmap; + int ac = tid_to_mac80211_ac[tid]; + int i; + + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { + if (tid_to_mac80211_ac[i] == ac) + remove_mac_queue = false; + } + } + + if (remove_mac_queue) + mvm->queue_info[queue].hw_queue_to_mac80211 &= + ~BIT(mac80211_queue); + mvm->queue_info[queue].hw_queue_refcount--; + + cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; + + IWL_DEBUG_TX_QUEUES(mvm, + "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", + queue, + mvm->queue_info[queue].hw_queue_refcount, + mvm->queue_info[queue].hw_queue_to_mac80211); + + /* If the queue is still enabled - nothing left to do in this func */ + if (cmd.enable) { + spin_unlock_bh(&mvm->queue_info_lock); + return; + } + + /* Make sure queue info is correct even though we overwrite it */ + WARN(mvm->queue_info[queue].hw_queue_refcount || + mvm->queue_info[queue].tid_bitmap || + mvm->queue_info[queue].hw_queue_to_mac80211, + "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", + queue, mvm->queue_info[queue].hw_queue_refcount, + mvm->queue_info[queue].hw_queue_to_mac80211, + mvm->queue_info[queue].tid_bitmap); + + /* If we are here - the queue is freed and we can zero out these vals */ + mvm->queue_info[queue].hw_queue_refcount = 0; + mvm->queue_info[queue].tid_bitmap = 0; + mvm->queue_info[queue].hw_queue_to_mac80211 = 0; + + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", + queue, ret); +} + +/** + * iwl_mvm_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) +{ + struct iwl_host_cmd cmd = { + .id = LQ_CMD, + .len = { sizeof(struct iwl_lq_cmd), }, + .flags = init ? 0 : CMD_ASYNC, + .data = { lq, }, + }; + + if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) + return -EINVAL; + + return iwl_mvm_send_cmd(mvm, &cmd); +} + +/** + * iwl_mvm_update_smps - Get a request to change the SMPS mode + * @req_type: The part of the driver who call for a change. + * @smps_requests: The request to change the SMPS mode. + * + * Get a requst to change the SMPS mode, + * and change it according to all other requests in the driver. + */ +void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request) +{ + struct iwl_mvm_vif *mvmvif; + enum ieee80211_smps_mode smps_mode; + int i; + + lockdep_assert_held(&mvm->mutex); + + /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ + if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) + return; + + if (vif->type == NL80211_IFTYPE_AP) + smps_mode = IEEE80211_SMPS_OFF; + else + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + mvmvif->smps_requests[req_type] = smps_request; + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { + smps_mode = IEEE80211_SMPS_STATIC; + break; + } + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) + smps_mode = IEEE80211_SMPS_DYNAMIC; + } + + ieee80211_request_smps(vif, smps_mode); +} + +int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) +{ + struct iwl_statistics_cmd scmd = { + .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, + }; + struct iwl_host_cmd cmd = { + .id = STATISTICS_CMD, + .len[0] = sizeof(scmd), + .data[0] = &scmd, + .flags = CMD_WANT_SKB, + }; + int ret; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); + iwl_free_resp(&cmd); + + if (clear) + iwl_mvm_accu_radio_stats(mvm); + + return 0; +} + +void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) +{ + mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; + mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; + mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; + mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; +} + +static void iwl_mvm_diversity_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool *result = _data; + int i; + + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || + mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) + *result = false; + } +} + +bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) +{ + bool result = true; + + lockdep_assert_held(&mvm->mutex); + + if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) + return false; + + if (mvm->cfg->rx_with_siso_diversity) + return false; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_diversity_iter, &result); + + return result; +} + +int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool value) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int res; + + lockdep_assert_held(&mvm->mutex); + + if (mvmvif->low_latency == value) + return 0; + + mvmvif->low_latency = value; + + res = iwl_mvm_update_quotas(mvm, false, NULL); + if (res) + return res; + + iwl_mvm_bt_coex_vif_change(mvm); + + return iwl_mvm_power_update_mac(mvm); +} + +static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *result = _data; + + if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) + *result = true; +} + +bool iwl_mvm_low_latency(struct iwl_mvm *mvm) +{ + bool result = false; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_ll_iter, &result); + + return result; +} + +struct iwl_bss_iter_data { + struct ieee80211_vif *vif; + bool error; +}; + +static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_bss_iter_data *data = _data; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return; + + if (data->vif) { + data->error = true; + return; + } + + data->vif = vif; +} + +struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) +{ + struct iwl_bss_iter_data bss_iter_data = {}; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bss_iface_iterator, &bss_iter_data); + + if (bss_iter_data.error) { + IWL_ERR(mvm, "More than one managed interface active!\n"); + return ERR_PTR(-EINVAL); + } + + return bss_iter_data.vif; +} + +unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool tdls, bool cmd_q) +{ + struct iwl_fw_dbg_trigger_tlv *trigger; + struct iwl_fw_dbg_trigger_txq_timer *txq_timer; + unsigned int default_timeout = + cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) + return iwlmvm_mod_params.tfd_q_hang_detect ? + default_timeout : IWL_WATCHDOG_DISABLED; + + trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); + txq_timer = (void *)trigger->data; + + if (tdls) + return le32_to_cpu(txq_timer->tdls); + + if (cmd_q) + return le32_to_cpu(txq_timer->command_queue); + + if (WARN_ON(!vif)) + return default_timeout; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_ADHOC: + return le32_to_cpu(txq_timer->ibss); + case NL80211_IFTYPE_STATION: + return le32_to_cpu(txq_timer->bss); + case NL80211_IFTYPE_AP: + return le32_to_cpu(txq_timer->softap); + case NL80211_IFTYPE_P2P_CLIENT: + return le32_to_cpu(txq_timer->p2p_client); + case NL80211_IFTYPE_P2P_GO: + return le32_to_cpu(txq_timer->p2p_go); + case NL80211_IFTYPE_P2P_DEVICE: + return le32_to_cpu(txq_timer->p2p_device); + default: + WARN_ON(1); + return mvm->cfg->base_params->wd_timeout; + } +} + +void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + const char *errmsg) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_mlme *trig_mlme; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + goto out; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); + trig_mlme = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + goto out; + + if (trig_mlme->stop_connection_loss && + --trig_mlme->stop_connection_loss) + goto out; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); + +out: + ieee80211_connection_loss(vif); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c new file mode 100644 index 000000000000..644b58bc5226 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -0,0 +1,685 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "iwl-trans.h" +#include "iwl-drv.h" +#include "internal.h" + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static const struct pci_device_id iwl_hw_card_ids[] = { +#if IS_ENABLED(CONFIG_IWLDVM) + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ + +/* 5300 Series WiFi */ + {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ + +/* 5350 Series WiFi/WiMax */ + {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ + +/* 5150 Series Wifi/WiMax */ + {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ + + {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ + +/* 6x00 Series */ + {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, + +/* 6x05 Series */ + {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ + {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ + +/* 6x30 Series */ + {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, + +/* 6x50 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, + +/* 6150 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, + +/* 1000 Series WiFi */ + {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, + +/* 100 Series WiFi */ + {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, + +/* 130 Series WiFi */ + {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, + +/* 2x00 Series */ + {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, + +/* 2x30 Series */ + {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, + +/* 6x35 Series */ + {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, + +/* 105 Series */ + {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, + +/* 135 Series */ + {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, +#endif /* CONFIG_IWLDVM */ + +#if IS_ENABLED(CONFIG_IWLMVM) +/* 7260 Series */ + {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, + +/* 3160 Series */ + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, + +/* 3165 Series */ + {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, + +/* 7265 Series */ + {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, + +/* 8000 Series */ + {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, +#endif /* CONFIG_IWLMVM */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); + +#ifdef CONFIG_ACPI +#define SPL_METHOD "SPLC" +#define SPL_DOMAINTYPE_MODULE BIT(0) +#define SPL_DOMAINTYPE_WIFI BIT(1) +#define SPL_DOMAINTYPE_WIGIG BIT(2) +#define SPL_DOMAINTYPE_RFEM BIT(3) + +static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) +{ + union acpi_object *limits, *domain_type, *power_limit; + + if (splx->type != ACPI_TYPE_PACKAGE || + splx->package.count != 2 || + splx->package.elements[0].type != ACPI_TYPE_INTEGER || + splx->package.elements[0].integer.value != 0) { + IWL_ERR(trans, "Unsupported splx structure\n"); + return 0; + } + + limits = &splx->package.elements[1]; + if (limits->type != ACPI_TYPE_PACKAGE || + limits->package.count < 2 || + limits->package.elements[0].type != ACPI_TYPE_INTEGER || + limits->package.elements[1].type != ACPI_TYPE_INTEGER) { + IWL_ERR(trans, "Invalid limits element\n"); + return 0; + } + + domain_type = &limits->package.elements[0]; + power_limit = &limits->package.elements[1]; + if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { + IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); + return 0; + } + + return power_limit->integer.value; +} + +static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) +{ + acpi_handle pxsx_handle; + acpi_handle handle; + struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + + pxsx_handle = ACPI_HANDLE(&pdev->dev); + if (!pxsx_handle) { + IWL_DEBUG_INFO(trans, + "Could not retrieve root port ACPI handle\n"); + return; + } + + /* Get the method's handle */ + status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_INFO(trans, "SPL method not found\n"); + return; + } + + /* Call SPLC with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &splx); + if (ACPI_FAILURE(status)) { + IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); + return; + } + + trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); + IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", + trans->dflt_pwr_limit); + kfree(splx.pointer); +} + +#else /* CONFIG_ACPI */ +static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {} +#endif + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; + struct iwl_trans *iwl_trans; + struct iwl_trans_pcie *trans_pcie; + int ret; + + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); + if (IS_ERR(iwl_trans)) + return PTR_ERR(iwl_trans); + +#if IS_ENABLED(CONFIG_IWLMVM) + /* + * special-case 7265D, it has the same PCI IDs. + * + * Note that because we already pass the cfg to the transport above, + * all the parameters that the transport uses must, until that is + * changed, be identical to the ones in the 7265D configuration. + */ + if (cfg == &iwl7265_2ac_cfg) + cfg_7265d = &iwl7265d_2ac_cfg; + else if (cfg == &iwl7265_2n_cfg) + cfg_7265d = &iwl7265d_2n_cfg; + else if (cfg == &iwl7265_n_cfg) + cfg_7265d = &iwl7265d_n_cfg; + if (cfg_7265d && + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { + cfg = cfg_7265d; + iwl_trans->cfg = cfg_7265d; + } +#endif + + pci_set_drvdata(pdev, iwl_trans); + + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); + trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); + + if (IS_ERR(trans_pcie->drv)) { + ret = PTR_ERR(trans_pcie->drv); + goto out_free_trans; + } + + set_dflt_pwr_limit(iwl_trans, pdev); + + /* register transport layer debugfs here */ + ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir); + if (ret) + goto out_free_drv; + + return 0; + +out_free_drv: + iwl_drv_stop(trans_pcie->drv); +out_free_trans: + iwl_trans_pcie_free(iwl_trans); + return ret; +} + +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_drv_stop(trans_pcie->drv); + iwl_trans_pcie_free(trans); +} + +#ifdef CONFIG_PM_SLEEP + +static int iwl_pci_suspend(struct device *device) +{ + /* Before you put code here, think about WoWLAN. You cannot check here + * whether WoWLAN is enabled or not, and your code will run even if + * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. + */ + + return 0; +} + +static int iwl_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + + /* Before you put code here, think about WoWLAN. You cannot check here + * whether WoWLAN is enabled or not, and your code will run even if + * WoWLAN is enabled - the NIC may be alive. + */ + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + if (!trans->op_mode) + return 0; + + /* + * Enable rfkill interrupt (in order to keep track of + * the rfkill status) + */ + iwl_enable_rfkill_int(trans); + + hw_rfkill = iwl_is_rfkill_set(trans); + + mutex_lock(&trans_pcie->mutex); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); + +#define IWL_PM_OPS (&iwl_dev_pm_ops) + +#else + +#define IWL_PM_OPS NULL + +#endif + +static struct pci_driver iwl_pci_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = iwl_pci_remove, + .driver.pm = IWL_PM_OPS, +}; + +int __must_check iwl_pci_register_driver(void) +{ + int ret; + ret = pci_register_driver(&iwl_pci_driver); + if (ret) + pr_err("Unable to initialize PCI module\n"); + + return ret; +} + +void iwl_pci_unregister_driver(void) +{ + pci_unregister_driver(&iwl_pci_driver); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h new file mode 100644 index 000000000000..feb2f7e81134 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -0,0 +1,569 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_trans_int_pcie_h__ +#define __iwl_trans_int_pcie_h__ + +#include +#include +#include +#include +#include +#include + +#include "iwl-fh.h" +#include "iwl-csr.h" +#include "iwl-trans.h" +#include "iwl-debug.h" +#include "iwl-io.h" +#include "iwl-op-mode.h" + +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) + +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) +#define RX_LOW_WATERMARK 8 + +struct iwl_host_cmd; + +/*This file includes the declaration that are internal to the + * trans_pcie layer */ + +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; +}; + +/** + * struct isr_statistics - interrupt statistics + * + */ +struct isr_statistics { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 tx; + u32 unhandled; +}; + +/** + * struct iwl_rxq - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation + * @write_actual: + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached + * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * @lock: + * @pool: initial pool of iwl_rx_mem_buffer for the queue + * @queue: actual rx queue + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rxq { + __le32 *bd; + dma_addr_t bd_dma; + u32 read; + u32 write; + u32 free_count; + u32 used_count; + u32 write_actual; + struct list_head rx_free; + struct list_head rx_used; + bool need_update; + struct iwl_rb_status *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @pool: initial pool of allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; +}; + +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +/** + * iwl_queue_inc_wrap - increment queue index, wrap back to beginning + * @index -- current index + */ +static inline int iwl_queue_inc_wrap(int index) +{ + return ++index & (TFD_QUEUE_SIZE_MAX - 1); +} + +/** + * iwl_queue_dec_wrap - decrement queue index, wrap back to end + * @index -- current index + */ +static inline int iwl_queue_dec_wrap(int index) +{ + return --index & (TFD_QUEUE_SIZE_MAX - 1); +} + +struct iwl_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct iwl_host_cmd *source; + u32 flags; +}; + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues. + * + * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware + * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless + * there might be HW changes in the future). For the normal TX + * queues, n_window, which is the size of the software queue data + * is also 256; however, for the command queue, n_window is only + * 32 since we don't need so many commands pending. Since the HW + * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, + * the software buffers (in the variables @meta, @txb in struct + * iwl_txq) only have 32 entries, while the HW buffers (@tfds in + * the same struct) have 256. + * This means that we end up with the following: + * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | + * SW entries: | 0 | ... | 31 | + * where N is a number between 0 and 7. This means that the SW + * data is a window overlayed over the HW queue. + */ +struct iwl_queue { + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ + /* use for monitoring and recovering the stuck queue */ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +}; + +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +/* + * The FH will write back to the first TB only, so we need + * to copy some data into the buffer regardless of whether + * it should be mapped or not. This indicates how big the + * first TB must be to include the scratch buffer. Since + * the scratch is 4 bytes at offset 12, it's 16 now. If we + * make it bigger then allocations will be bigger and copy + * slower, so that's probably not useful. + */ +#define IWL_HCMD_SCRATCHBUF_SIZE 16 + +struct iwl_pcie_txq_entry { + struct iwl_device_cmd *cmd; + struct sk_buff *skb; + /* buffer to free after command completes */ + const void *free_buf; + struct iwl_cmd_meta meta; +}; + +struct iwl_pcie_txq_scratch_buf { + struct iwl_cmd_header hdr; + u8 buf[8]; + __le32 scratch; +}; + +/** + * struct iwl_txq - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @tfds: transmit frame descriptors (DMA memory) + * @scratchbufs: start of command headers, including scratch buffers, for + * the writeback -- this is DMA memory and an array holding one buffer + * for each command on the queue + * @scratchbufs_dma: DMA address for the scratchbufs start + * @entries: transmit entries (driver state) + * @lock: queue lock + * @stuck_timer: timer that fires if queue gets stuck + * @trans_pcie: pointer back to transport (for timer) + * @need_update: indicates need to update read/write index + * @active: stores if queue is active + * @ampdu: true if this queue is an ampdu queue for an specific RA/TID + * @wd_timeout: queue watchdog timeout (jiffies) - per queue + * @frozen: tx stuck queue timer is frozen + * @frozen_expiry_remainder: remember how long until the timer fires + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + */ +struct iwl_txq { + struct iwl_queue q; + struct iwl_tfd *tfds; + struct iwl_pcie_txq_scratch_buf *scratchbufs; + dma_addr_t scratchbufs_dma; + struct iwl_pcie_txq_entry *entries; + spinlock_t lock; + unsigned long frozen_expiry_remainder; + struct timer_list stuck_timer; + struct iwl_trans_pcie *trans_pcie; + bool need_update; + bool frozen; + u8 active; + bool ampdu; + unsigned long wd_timeout; +}; + +static inline dma_addr_t +iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) +{ + return txq->scratchbufs_dma + + sizeof(struct iwl_pcie_txq_scratch_buf) * idx; +} + +/** + * struct iwl_trans_pcie - PCIe transport specific data + * @rxq: all the RX queue data + * @rba: allocator for RX replenishing + * @drv - pointer to iwl_drv + * @trans: pointer to the generic transport area + * @scd_base_addr: scheduler sram base address in SRAM + * @scd_bc_tbls: pointer to the byte count table of the scheduler + * @kw: keep warm address + * @pci_dev: basic pci-network driver stuff + * @hw_base: pci hardware address support + * @ucode_write_complete: indicates that the ucode has been copied. + * @ucode_write_waitq: wait queue for uCode load + * @cmd_queue - command queue number + * @rx_buf_size_8k: 8 kB RX buffer size + * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) + * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: true when ucode supports wide command header format + * @rx_page_order: page order for receive buffer size + * @reg_lock: protect hw register access + * @mutex: to protect stop_device / start_fw / start_hw + * @cmd_in_flight: true when we have a host command in flight + * @fw_mon_phys: physical address of the buffer for the firmware monitor + * @fw_mon_page: points to the first page of the buffer for the firmware monitor + * @fw_mon_size: size of the buffer for the firmware monitor + */ +struct iwl_trans_pcie { + struct iwl_rxq rxq; + struct iwl_rb_allocator rba; + struct iwl_trans *trans; + struct iwl_drv *drv; + + struct net_device napi_dev; + struct napi_struct napi; + + /* INT ICT Table */ + __le32 *ict_tbl; + dma_addr_t ict_tbl_dma; + int ict_index; + bool use_ict; + bool is_down; + struct isr_statistics isr_stats; + + spinlock_t irq_lock; + struct mutex mutex; + u32 inta_mask; + u32 scd_base_addr; + struct iwl_dma_ptr scd_bc_tbls; + struct iwl_dma_ptr kw; + + struct iwl_txq *txq; + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + + /* PCI bus related data */ + struct pci_dev *pci_dev; + void __iomem *hw_base; + + bool ucode_write_complete; + wait_queue_head_t ucode_write_waitq; + wait_queue_head_t wait_command_queue; + + u8 cmd_queue; + u8 cmd_fifo; + unsigned int cmd_q_wdg_timeout; + u8 n_no_reclaim_cmds; + u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; + + bool rx_buf_size_8k; + bool bc_table_dword; + bool scd_set_active; + bool wide_cmd_header; + u32 rx_page_order; + + const char *const *command_names; + + /*protect hw register */ + spinlock_t reg_lock; + bool cmd_hold_nic_awake; + bool ref_cmd_in_flight; + + /* protect ref counter */ + spinlock_t ref_lock; + u32 ref_count; + + dma_addr_t fw_mon_phys; + struct page *fw_mon_page; + u32 fw_mon_size; +}; + +#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ + ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) + +static inline struct iwl_trans * +iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) +{ + return container_of((void *)trans_pcie, struct iwl_trans, + trans_specific); +} + +/* + * Convention: trans API functions: iwl_trans_pcie_XXX + * Other functions: iwl_pcie_XXX + */ +struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg *cfg); +void iwl_trans_pcie_free(struct iwl_trans *trans); + +/***************************************************** +* RX +******************************************************/ +int iwl_pcie_rx_init(struct iwl_trans *trans); +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); +int iwl_pcie_rx_stop(struct iwl_trans *trans); +void iwl_pcie_rx_free(struct iwl_trans *trans); + +/***************************************************** +* ICT - interrupt handling +******************************************************/ +irqreturn_t iwl_pcie_isr(int irq, void *data); +int iwl_pcie_alloc_ict(struct iwl_trans *trans); +void iwl_pcie_free_ict(struct iwl_trans *trans); +void iwl_pcie_reset_ict(struct iwl_trans *trans); +void iwl_pcie_disable_ict(struct iwl_trans *trans); + +/***************************************************** +* TX / HCMD +******************************************************/ +int iwl_pcie_tx_init(struct iwl_trans *trans); +void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); +int iwl_pcie_tx_stop(struct iwl_trans *trans); +void iwl_pcie_tx_free(struct iwl_trans *trans); +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout); +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd); +int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int txq_id); +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); +void iwl_pcie_hcmd_complete(struct iwl_trans *trans, + struct iwl_rx_cmd_buffer *rxb); +void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs); +void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); + +void iwl_trans_pcie_ref(struct iwl_trans *trans); +void iwl_trans_pcie_unref(struct iwl_trans *trans); + +static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +/***************************************************** +* Error handling +******************************************************/ +void iwl_pcie_dump_csr(struct iwl_trans *trans); + +/***************************************************** +* Helpers +******************************************************/ +static inline void iwl_disable_interrupts(struct iwl_trans *trans) +{ + clear_bit(STATUS_INT_ENABLED, &trans->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(trans, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(trans, CSR_INT, 0xffffffff); + iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); +} + +static inline void iwl_enable_interrupts(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &trans->status); + trans_pcie->inta_mask = CSR_INI_SET_MASK; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); +} + +static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); + trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); +} + +static inline void iwl_wake_queue(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); + iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); + } +} + +static inline void iwl_stop_queue(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { + iwl_op_mode_queue_full(trans->op_mode, txq->q.id); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); + } else + IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", + txq->q.id); +} + +static inline bool iwl_queue_used(const struct iwl_queue *q, int i) +{ + return q->write_ptr >= q->read_ptr ? + (i >= q->read_ptr && i < q->write_ptr) : + !(i < q->read_ptr && i >= q->write_ptr); +} + +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) +{ + return index & (q->n_window - 1); +} + +static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, + u8 cmd) +{ + if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) + return "UNKNOWN"; + return trans_pcie->command_names[cmd]; +} + +static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) +{ + return !(iwl_read32(trans, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); +} + +static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, + u32 reg, u32 mask, u32 value) +{ + u32 v; + +#ifdef CONFIG_IWLWIFI_DEBUG + WARN_ON_ONCE(value & ~mask); +#endif + + v = iwl_read32(trans, reg); + v &= ~mask; + v |= value; + iwl_write32(trans, reg, v); +} + +static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, + u32 reg, u32 mask) +{ + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); +} + +static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, + u32 reg, u32 mask) +{ + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); +} + +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); + +#endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c new file mode 100644 index 000000000000..e06591f625c4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -0,0 +1,1548 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include + +#include "iwl-prph.h" +#include "iwl-io.h" +#include "internal.h" +#include "iwl-op-mode.h" + +/****************************************************************************** + * + * RX path functions + * + ******************************************************************************/ + +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + If there are no allocated buffers in iwl->rxq->rx_free, + * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. + * If there were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rxq_alloc() Allocates rx_free + * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_pcie_rxq_restock. + * Used only during initialization. + * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. + * Calls iwl_pcie_rxq_restock to refill any empty + * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue + * ... + * + */ + +/* + * iwl_rxq_space - Return number of free slots available in queue. + */ +static int iwl_rxq_space(const struct iwl_rxq *rxq) +{ + /* Make sure RX_QUEUE_SIZE is a power of 2 */ + BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); + + /* + * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity + * between empty and completely full queues. + * The following is equivalent to modulo by RX_QUEUE_SIZE and is well + * defined for negative dividends. + */ + return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); +} + +/* + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/* + * iwl_pcie_rx_stop - stops the Rx DMA + */ +int iwl_pcie_rx_stop(struct iwl_trans *trans) +{ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); +} + +/* + * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue + */ +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 reg; + + lockdep_assert_held(&rxq->lock); + + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. there is a chance that the NIC is asleep + */ + if (!trans->cfg->base_params->shadow_reg_enable && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", + reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + rxq->need_update = true; + return; + } + } + + rxq->write_actual = round_down(rxq->write, 8); + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); +} + +static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + + spin_lock(&rxq->lock); + + if (!rxq->need_update) + goto exit_unlock; + + iwl_pcie_rxq_inc_wr_ptr(trans); + rxq->need_update = false; + + exit_unlock: + spin_unlock(&rxq->lock); +} + +/* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void iwl_pcie_rxq_restock(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rx_mem_buffer *rxb; + + /* + * If the device isn't enabled - not need to try to add buffers... + * This can happen when we stop the device and still have an interrupt + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). + * So don't try to restock if the APM has been already stopped. + */ + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return; + + spin_lock(&rxq->lock); + while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock(&rxq->lock); + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans); + spin_unlock(&rxq->lock); + } +} + +/* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, + gfp_t priority) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct page *page; + gfp_t gfp_mask = priority; + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* Issue an error if the hardware has consumed more than half + * of its free buffer list and we don't have enough + * pre-allocated buffers. +` */ + if (rxq->free_count <= RX_LOW_WATERMARK && + iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && + net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", + rxq->free_count); + return NULL; + } + return page; +} + +/* + * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD + * + * A used RBD is an Rx buffer that has been given to the stack. To use it again + * a page must be allocated and the RBD must point to the page. This function + * doesn't change the HW pointer but handles the list of pages that is used by + * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly + * allocated buffers. + */ +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + while (1) { + spin_lock(&rxq->lock); + if (list_empty(&rxq->rx_used)) { + spin_unlock(&rxq->lock); + return; + } + spin_unlock(&rxq->lock); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans, priority); + if (!page) + return; + + spin_lock(&rxq->lock); + + if (list_empty(&rxq->rx_used)) { + spin_unlock(&rxq->lock); + __free_pages(page, trans_pcie->rx_page_order); + return; + } + rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + spin_unlock(&rxq->lock); + + BUG_ON(rxb->page); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = + dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + spin_lock(&rxq->lock); + list_add(&rxb->list, &rxq->rx_used); + spin_unlock(&rxq->lock); + __free_pages(page, trans_pcie->rx_page_order); + return; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock(&rxq->lock); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + + spin_unlock(&rxq->lock); + } +} + +static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + int i; + + lockdep_assert_held(&rxq->lock); + + for (i = 0; i < RX_QUEUE_SIZE; i++) { + if (!rxq->pool[i].page) + continue; + dma_unmap_page(trans->dev, rxq->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); + rxq->pool[i].page = NULL; + } +} + +/* + * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free + * + * When moving to rx_free an page is allocated for the slot. + * + * Also restock the Rx queue via iwl_pcie_rxq_restock. + * This is called only during initialization + */ +static void iwl_pcie_rx_replenish(struct iwl_trans *trans) +{ + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); + + iwl_pcie_rxq_restock(trans); +} + +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct list_head local_empty; + int pending = atomic_xchg(&rba->req_pending, 0); + + IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); + + /* If we were scheduled - there is at least one request */ + spin_lock(&rba->lock); + /* swap out the rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + while (pending) { + int i; + struct list_head local_allocated; + + INIT_LIST_HEAD(&local_allocated); + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + pending--; + if (!pending) { + pending = atomic_xchg(&rba->req_pending, 0); + IWL_DEBUG_RX(trans, + "Pending allocation requests = %d\n", + pending); + } + + spin_lock(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* get more empty RBDs for current pending requests */ + list_splice_tail_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_ready); + } + + spin_lock(&rba->lock); + /* return unused rbds to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + +/* + * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + */ +static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rx_mem_buffer + *out[RX_CLAIM_REQ_ALLOC]) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + /* + * atomic_dec_if_positive returns req_ready - 1 for any scenario. + * If req_ready is 0 atomic_dec_if_positive will return -1 and this + * function will return -ENOMEM, as there are no ready requests. + * atomic_dec_if_positive will perofrm the *actual* decrement only if + * req_ready > 0, i.e. - there are ready requests and the function + * hands one request to the caller. + */ + if (atomic_dec_if_positive(&rba->req_ready) < 0) + return -ENOMEM; + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + out[i] = list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + list_del(&out[i]->list); + } + spin_unlock(&rba->lock); + + return 0; +} + +static void iwl_pcie_rx_allocator_work(struct work_struct *data) +{ + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); + struct iwl_trans_pcie *trans_pcie = + container_of(rba_p, struct iwl_trans_pcie, rba); + + iwl_pcie_rx_allocator(trans_pcie->trans); +} + +static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct device *dev = trans->dev; + + memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); + + spin_lock_init(&rxq->lock); + spin_lock_init(&rba->lock); + + if (WARN_ON(rxq->bd || rxq->rb_stts)) + return -EINVAL; + + /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + /*Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb_stts; + + return 0; + +err_rb_stts: + dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; +err_bd: + return -ENOMEM; +} + +static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + + if (trans_pcie->rx_buf_size_8k) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + /* reset and flush pointers */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); + + /* Reset driver's Rx queue write index */ + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in + * the credit mechanism in 5000 HW RX FIFO + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + rb_size| + (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + /* W/A for interrupt coalescing bug in 7260 and 3160 */ + if (trans->cfg->host_interrupt_operation_mode) + iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); +} + +static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +{ + int i; + + lockdep_assert_held(&rxq->lock); + + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + rxq->used_count = 0; + + for (i = 0; i < RX_QUEUE_SIZE; i++) + list_add(&rxq->pool[i].list, &rxq->rx_used); +} + +static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) +{ + int i; + + lockdep_assert_held(&rba->lock); + + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + + for (i = 0; i < RX_POOL_SIZE; i++) + list_add(&rba->pool[i].list, &rba->rbd_empty); +} + +static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rba->lock); + + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!rba->pool[i].page) + continue; + dma_unmap_page(trans->dev, rba->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); + rba->pool[i].page = NULL; + } +} + +int iwl_pcie_rx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i, err; + + if (!rxq->bd) { + err = iwl_pcie_rx_alloc(trans); + if (err) + return err; + } + if (!rba->alloc_wq) + rba->alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rx_free_rba(trans); + iwl_pcie_rx_init_rba(rba); + spin_unlock(&rba->lock); + + spin_lock(&rxq->lock); + + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rxq_free_rbs(trans); + iwl_pcie_rx_init_rxb_lists(rxq); + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); + spin_unlock(&rxq->lock); + + iwl_pcie_rx_replenish(trans); + + iwl_pcie_rx_hw_init(trans, rxq); + + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans); + spin_unlock(&rxq->lock); + + return 0; +} + +void iwl_pcie_rx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /*if rxq->bd is NULL, it means that nothing has been allocated, + * exit now */ + if (!rxq->bd) { + IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); + return; + } + + cancel_work_sync(&rba->rx_alloc); + if (rba->alloc_wq) { + destroy_workqueue(rba->alloc_wq); + rba->alloc_wq = NULL; + } + + spin_lock(&rba->lock); + iwl_pcie_rx_free_rba(trans); + spin_unlock(&rba->lock); + + spin_lock(&rxq->lock); + iwl_pcie_rxq_free_rbs(trans); + spin_unlock(&rxq->lock); + + dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + else + IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); + rxq->rb_stts_dma = 0; + rxq->rb_stts = NULL; +} + +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq, bool emergency) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + if (unlikely(emergency)) + return; + + /* Count the allocator owned RBDs */ + rxq->used_count++; + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + +static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + bool emergency) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + bool page_stolen = false; + int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + u32 offset = 0; + + if (WARN_ON(!rxb)) + return; + + dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); + + while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { + struct iwl_rx_packet *pkt; + u16 sequence; + bool reclaim; + int index, cmd_index, len; + struct iwl_rx_cmd_buffer rxcb = { + ._offset = offset, + ._rx_page_order = trans_pcie->rx_page_order, + ._page = rxb->page, + ._page_stolen = false, + .truesize = max_len, + }; + + pkt = rxb_addr(&rxcb); + + if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) + break; + + IWL_DEBUG_RX(trans, + "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", + rxcb._offset, + get_cmd_string(trans_pcie, pkt->hdr.cmd), + pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); + + len = iwl_rx_packet_len(pkt); + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); + trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); + if (reclaim) { + int i; + + for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { + if (trans_pcie->no_reclaim_cmds[i] == + pkt->hdr.cmd) { + reclaim = false; + break; + } + } + } + + sequence = le16_to_cpu(pkt->hdr.sequence); + index = SEQ_TO_INDEX(sequence); + cmd_index = get_cmd_index(&txq->q, index); + + iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); + + if (reclaim) { + kzfree(txq->entries[cmd_index].free_buf); + txq->entries[cmd_index].free_buf = NULL; + } + + /* + * After here, we should always check rxcb._page_stolen, + * if it is true then one of the handlers took the page. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking + * iwl_trans_send_cmd() + * as we reclaim the driver command queue */ + if (!rxcb._page_stolen) + iwl_pcie_hcmd_complete(trans, &rxcb); + else + IWL_WARN(trans, "Claim null rxb?\n"); + } + + page_stolen |= rxcb._page_stolen; + offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); + } + + /* page was stolen from us -- free our reference */ + if (page_stolen) { + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + if (rxb->page != NULL) { + rxb->page_dma = + dma_map_page(trans->dev, rxb->page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + /* + * free the page(s) as well to not break + * the invariant that the items on the used + * list have no page(s) + */ + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + } else + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); +} + +/* + * iwl_pcie_rx_handle - Main entry function for receiving responses from fw + */ +static void iwl_pcie_rx_handle(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 r, i, j, count = 0; + bool emergency = false; + +restart: + spin_lock(&rxq->lock); + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); + + while (i != r) { + struct iwl_rx_mem_buffer *rxb; + + if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + emergency = true; + + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + + IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", + r, i, rxb); + iwl_pcie_rx_handle_rb(trans, rxb, emergency); + + i = (i + 1) & RX_QUEUE_MASK; + + /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && + !emergency) { + /* Add the remaining 6 empty RBDs + * for allocator use + */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, + &rba->rbd_empty); + spin_unlock(&rba->lock); + } + + /* If not ready - continue, will try to reclaim later. + * No need to reschedule work - allocator exits only on + * success */ + if (!iwl_pcie_rx_allocator_get(trans, out)) { + /* If success - then RX_CLAIM_REQ_ALLOC + * buffers were retrieved and should be added + * to free list */ + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { + list_add_tail(&out[j]->list, + &rxq->rx_free); + rxq->free_count++; + } + } + } + if (emergency) { + count++; + if (count == 8) { + count = 0; + if (rxq->used_count < RX_QUEUE_SIZE / 3) + emergency = false; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + spin_lock(&rxq->lock); + } + } + /* handle restock for three cases, can be all of them at once: + * - we just pulled buffers from the allocator + * - we have 8+ unstolen pages accumulated + * - we are in emergency and allocated buffers + */ + if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_restock(trans); + goto restart; + } + } + + /* Backtrack one entry */ + rxq->read = i; + spin_unlock(&rxq->lock); + + /* + * handle a case where in emergency there are some unallocated RBDs. + * those RBDs are in the used list, but are not tracked by the queue's + * used_count which counts allocator owned RBDs. + * unallocated emergency RBDs must be allocated on exit, otherwise + * when called again the function may not be in emergency mode and + * they will be handed to the allocator with no tracking in the RBD + * allocator counters, which will lead to them never being claimed back + * by the queue. + * by allocating them here, they are now in the queue free list, and + * will be restocked by the next call of iwl_pcie_rxq_restock. + */ + if (unlikely(emergency && count)) + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + + if (trans_pcie->napi.poll) + napi_gro_flush(&trans_pcie->napi, false); +} + +/* + * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card + */ +static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ + if (trans->cfg->internal_wimax_coex && + !trans->cfg->apmg_not_supported && + (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & + APMS_CLK_VAL_MRB_FUNC_MODE) || + (iwl_read_prph(trans, APMG_PS_CTRL_REG) & + APMG_PS_CTRL_VAL_RESET_REQ))) { + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + iwl_op_mode_wimax_active(trans->op_mode); + wake_up(&trans_pcie->wait_command_queue); + return; + } + + iwl_pcie_dump_csr(trans); + iwl_dump_fh(trans, NULL); + + local_bh_disable(); + /* The STATUS_FW_ERROR bit is set in this function. This must happen + * before we wake up the command caller, to ensure a proper cleanup. */ + iwl_trans_fw_error(trans); + local_bh_enable(); + + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) + del_timer(&trans_pcie->txq[i].stuck_timer); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + wake_up(&trans_pcie->wait_command_queue); +} + +static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) +{ + u32 inta; + + lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); + + trace_iwlwifi_dev_irq(trans->dev); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(trans, CSR_INT); + + /* the thread will service interrupts and re-enable them */ + return inta; +} + +/* a device (PCI-E) page is 4096 bytes long */ +#define ICT_SHIFT 12 +#define ICT_SIZE (1 << ICT_SHIFT) +#define ICT_COUNT (ICT_SIZE / sizeof(u32)) + +/* interrupt handler using ict table, with this interrupt driver will + * stop using INTA register to get device's interrupt, reading this register + * is expensive, device will write interrupts in ICT dram table, increment + * index then will fire interrupt to driver, driver will OR all ICT table + * entries from current index up to table entry with 0 value. the result is + * the interrupt we need to service, driver will set the entries back to 0 and + * set index. + */ +static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 inta; + u32 val = 0; + u32 read; + + trace_iwlwifi_dev_irq(trans->dev); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); + if (!read) + return 0; + + /* + * Collect all entries up to the first 0, starting from ict_index; + * note we already read at ict_index. + */ + do { + val |= read; + IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", + trans_pcie->ict_index, read); + trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; + trans_pcie->ict_index = + ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); + + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, + read); + } while (read); + + /* We should not get this value, just ignore it. */ + if (val == 0xffffffff) + val = 0; + + /* + * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit + * (bit 15 before shifting it to 31) to clear when using interrupt + * coalescing. fortunately, bits 18 and 19 stay set when this happens + * so we use them to decide on the real state of the Rx bit. + * In order words, bit 15 is set if bit 18 or bit 19 are set. + */ + if (val & 0xC0000) + val |= 0x8000; + + inta = (0xff & val) | ((0xff00 & val) << 16); + return inta; +} + +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) +{ + struct iwl_trans *trans = dev_id; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + u32 inta = 0; + u32 handled = 0; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + spin_lock(&trans_pcie->irq_lock); + + /* dram interrupt table not set yet, + * use legacy interrupt. + */ + if (likely(trans_pcie->use_ict)) + inta = iwl_pcie_int_cause_ict(trans); + else + inta = iwl_pcie_int_cause_non_ict(trans); + + if (iwl_have_debug_level(IWL_DL_ISR)) { + IWL_DEBUG_ISR(trans, + "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", + inta, trans_pcie->inta_mask, + iwl_read32(trans, CSR_INT_MASK), + iwl_read32(trans, CSR_FH_INT_STATUS)); + if (inta & (~trans_pcie->inta_mask)) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt (0x%08x)\n", + inta & (~trans_pcie->inta_mask)); + } + + inta &= trans_pcie->inta_mask; + + /* + * Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. + */ + if (unlikely(!inta)) { + IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); + /* + * Re-enable interrupts here since we don't + * have anything to service + */ + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + iwl_enable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_NONE; + } + + if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* + * Hardware disappeared. It might have + * already raised an interrupt. + */ + IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + spin_unlock(&trans_pcie->irq_lock); + goto out; + } + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + */ + /* There is a hardware bug in the interrupt mask function that some + * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if + * they are disabled in the CSR_INT_MASK register. Furthermore the + * ICT interrupt handling mechanism has another bug that might cause + * these unmasked interrupts fail to be detected. We workaround the + * hardware bugs here by ACKing all the possible interrupts so that + * interrupt coalescing can still be achieved. + */ + iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); + + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", + inta, iwl_read32(trans, CSR_INT_MASK)); + + spin_unlock(&trans_pcie->irq_lock); + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(trans, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(trans); + + isr_stats->hw++; + iwl_pcie_irq_handle_error(trans); + + handled |= CSR_INT_BIT_HW_ERR; + + goto out; + } + + if (iwl_have_debug_level(IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(trans, + "Scheduler finished to transmit the frame/frames.\n"); + isr_stats->sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(trans, "Alive interrupt\n"); + isr_stats->alive++; + } + } + + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + bool hw_rfkill; + + hw_rfkill = iwl_is_rfkill_set(trans); + IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", + hw_rfkill ? "disable radio" : "enable radio"); + + isr_stats->rfkill++; + + mutex_lock(&trans_pcie->mutex); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); + if (hw_rfkill) { + set_bit(STATUS_RFKILL, &trans->status); + if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status)) + IWL_DEBUG_RF_KILL(trans, + "Rfkill while SYNC HCMD in flight\n"); + wake_up(&trans_pcie->wait_command_queue); + } else { + clear_bit(STATUS_RFKILL, &trans->status); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(trans, "Microcode CT kill error detected.\n"); + isr_stats->ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(trans, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); + + isr_stats->wakeup++; + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | + CSR_INT_BIT_RX_PERIODIC)) { + IWL_DEBUG_ISR(trans, "Rx interrupt\n"); + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + iwl_write32(trans, CSR_FH_INT_STATUS, + CSR_FH_INT_RX_MASK); + } + if (inta & CSR_INT_BIT_RX_PERIODIC) { + handled |= CSR_INT_BIT_RX_PERIODIC; + iwl_write32(trans, + CSR_INT, CSR_INT_BIT_RX_PERIODIC); + } + /* Sending RX interrupt require many steps to be done in the + * the device: + * 1- write interrupt to current index in ICT table. + * 2- dma RX frame. + * 3- update RX shared data to indicate last write index. + * 4- send interrupt. + * This could lead to RX race, driver could receive RX interrupt + * but the shared data changes does not reflect this; + * periodic interrupt will detect any dangling Rx activity. + */ + + /* Disable periodic interrupt; we use it as just a one-shot. */ + iwl_write8(trans, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_DIS); + + /* + * Enable periodic interrupt in 8 msec only if we received + * real RX interrupt (instead of just periodic int), to catch + * any dangling Rx interrupt. If it was just the periodic + * interrupt, there was no dangling Rx activity, and no need + * to extend the periodic interrupt; one-shot is enough. + */ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) + iwl_write8(trans, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_ENA); + + isr_stats->rx++; + + local_bh_disable(); + iwl_pcie_rx_handle(trans); + local_bh_enable(); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); + IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); + isr_stats->tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + trans_pcie->ucode_write_complete = true; + wake_up(&trans_pcie->ucode_write_waitq); + } + + if (inta & ~handled) { + IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + isr_stats->unhandled++; + } + + if (inta & ~(trans_pcie->inta_mask)) { + IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", + inta & ~trans_pcie->inta_mask); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + iwl_enable_interrupts(trans); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(trans); + +out: + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_HANDLED; +} + +/****************************************************************************** + * + * ICT functions + * + ******************************************************************************/ + +/* Free dram table */ +void iwl_pcie_free_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->ict_tbl) { + dma_free_coherent(trans->dev, ICT_SIZE, + trans_pcie->ict_tbl, + trans_pcie->ict_tbl_dma); + trans_pcie->ict_tbl = NULL; + trans_pcie->ict_tbl_dma = 0; + } +} + +/* + * allocate dram shared table, it is an aligned memory + * block of ICT_SIZE. + * also reset all data related to ICT table interrupt. + */ +int iwl_pcie_alloc_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + trans_pcie->ict_tbl = + dma_zalloc_coherent(trans->dev, ICT_SIZE, + &trans_pcie->ict_tbl_dma, + GFP_KERNEL); + if (!trans_pcie->ict_tbl) + return -ENOMEM; + + /* just an API sanity check ... it is guaranteed to be aligned */ + if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { + iwl_pcie_free_ict(trans); + return -EINVAL; + } + + IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n", + (unsigned long long)trans_pcie->ict_tbl_dma, + trans_pcie->ict_tbl); + + return 0; +} + +/* Device is going up inform it about using ICT interrupt table, + * also we need to tell the driver to start using ICT interrupt. + */ +void iwl_pcie_reset_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 val; + + if (!trans_pcie->ict_tbl) + return; + + spin_lock(&trans_pcie->irq_lock); + iwl_disable_interrupts(trans); + + memset(trans_pcie->ict_tbl, 0, ICT_SIZE); + + val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; + + val |= CSR_DRAM_INT_TBL_ENABLE | + CSR_DRAM_INIT_TBL_WRAP_CHECK | + CSR_DRAM_INIT_TBL_WRITE_POINTER; + + IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); + + iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); + trans_pcie->use_ict = true; + trans_pcie->ict_index = 0; + iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); + iwl_enable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); +} + +/* Device is going down disable ict interrupt usage */ +void iwl_pcie_disable_ict(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock(&trans_pcie->irq_lock); + trans_pcie->use_ict = false; + spin_unlock(&trans_pcie->irq_lock); +} + +irqreturn_t iwl_pcie_isr(int irq, void *data) +{ + struct iwl_trans *trans = data; + + if (!trans) + return IRQ_NONE; + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. + */ + iwl_write32(trans, CSR_INT_MASK, 0x00000000); + + return IRQ_WAKE_THREAD; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c new file mode 100644 index 000000000000..90283453073c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -0,0 +1,2825 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-drv.h" +#include "iwl-trans.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-scd.h" +#include "iwl-agn-hw.h" +#include "iwl-fw-error-dump.h" +#include "internal.h" +#include "iwl-fh.h" + +/* extended range in FW SRAM */ +#define IWL_FW_MEM_EXTENDED_START 0x40000 +#define IWL_FW_MEM_EXTENDED_END 0x57FFF + +static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->fw_mon_page) + return; + + dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, DMA_FROM_DEVICE); + __free_pages(trans_pcie->fw_mon_page, + get_order(trans_pcie->fw_mon_size)); + trans_pcie->fw_mon_page = NULL; + trans_pcie->fw_mon_phys = 0; + trans_pcie->fw_mon_size = 0; +} + +static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct page *page = NULL; + dma_addr_t phys; + u32 size = 0; + u8 power; + + if (!max_power) { + /* default max_power is maximum */ + max_power = 26; + } else { + max_power += 11; + } + + if (WARN(max_power > 26, + "External buffer size for monitor is too big %d, check the FW TLV\n", + max_power)) + return; + + if (trans_pcie->fw_mon_page) { + dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, + DMA_FROM_DEVICE); + return; + } + + phys = 0; + for (power = max_power; power >= 11; power--) { + int order; + + size = BIT(power); + order = get_order(size); + page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, + order); + if (!page) + continue; + + phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, phys)) { + __free_pages(page, order); + page = NULL; + continue; + } + IWL_INFO(trans, + "Allocated 0x%08x bytes (order %d) for firmware monitor.\n", + size, order); + break; + } + + if (WARN_ON_ONCE(!page)) + return; + + if (power != max_power) + IWL_ERR(trans, + "Sorry - debug buffer is only %luK while you requested %luK\n", + (unsigned long)BIT(power - 10), + (unsigned long)BIT(max_power - 10)); + + trans_pcie->fw_mon_page = page; + trans_pcie->fw_mon_phys = phys; + trans_pcie->fw_mon_size = size; +} + +static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) +{ + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, + ((reg & 0x0000ffff) | (2 << 28))); + return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); +} + +static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) +{ + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, + ((reg & 0x0000ffff) | (3 << 28))); +} + +static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) +{ + if (trans->cfg->apmg_not_supported) + return; + + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + else + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +static void iwl_pcie_apm_config(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 lctl; + u16 cap; + + /* + * HW bug W/A for instability in PCIe bus L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); + if (lctl & PCI_EXP_LNKCTL_ASPM_L1) + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); + else + iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); + trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); + + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); + trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; + dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", + (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", + trans->ltr_enabled ? "En" : "Dis"); +} + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl_pcie_apm_init(struct iwl_trans *trans) +{ + int ret = 0; + IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + iwl_pcie_apm_config(trans); + + /* Configure analog phase-lock-loop before activating to D0A */ + if (trans->cfg->base_params->pll_cfg_val) + iwl_set_bit(trans, CSR_ANA_PLL_CFG, + trans->cfg->base_params->pll_cfg_val); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(trans, "Failed to init the card\n"); + goto out; + } + + if (trans->cfg->host_interrupt_operation_mode) { + /* + * This is a bit of an abuse - This is needed for 7260 / 3160 + * only check host_interrupt_operation_mode even if this is + * not related to host_interrupt_operation_mode. + * + * Enable the oscillator to count wake up time for L1 exit. This + * consumes slightly more power (100uA) - but allows to be sure + * that we wake up from L1 on time. + * + * This looks weird: read twice the same register, discard the + * value, set a bit, and yet again, read that same register + * just to discard the value. But that's the way the hardware + * seems to like it. + */ + iwl_read_prph(trans, OSC_CLK); + iwl_read_prph(trans, OSC_CLK); + iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); + iwl_read_prph(trans, OSC_CLK); + iwl_read_prph(trans, OSC_CLK); + } + + /* + * Enable DMA clock and wait for it to stabilize. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" + * bits do not disable clocks. This preserves any hardware + * bits already set by default in "CLK_CTRL_REG" after reset. + */ + if (!trans->cfg->apmg_not_supported) { + iwl_write_prph(trans, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + + /* Clear the interrupt in APMG if the NIC is in RFKILL */ + iwl_write_prph(trans, APMG_RTC_INT_STT_REG, + APMG_RTC_INT_STT_RFKILL); + } + + set_bit(STATUS_DEVICE_ENABLED, &trans->status); + +out: + return ret; +} + +/* + * Enable LP XTAL to avoid HW bug where device may consume much power if + * FW is not loaded after device reset. LP XTAL is disabled by default + * after device HW reset. Do it only if XTAL is fed by internal source. + * Configure device's "persistence" mode to avoid resetting XTAL again when + * SHRD_HW_RST occurs in S3. + */ +static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) +{ + int ret; + u32 apmg_gp1_reg; + u32 apmg_xtal_cfg_reg; + u32 dl_cfg_reg; + + /* Force XTAL ON */ + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + + /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is possible. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (WARN_ON(ret < 0)) { + IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); + /* Release XTAL ON request */ + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + return; + } + + /* + * Clear "disable persistence" to avoid LP XTAL resetting when + * SHRD_HW_RST is applied in S3. + */ + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_PERSIST_DIS); + + /* + * Force APMG XTAL to be active to prevent its disabling by HW + * caused by APMG idle state. + */ + apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, + SHR_APMG_XTAL_CFG_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, + apmg_xtal_cfg_reg | + SHR_APMG_XTAL_CFG_XTAL_ON_REQ); + + /* + * Reset entire device again - do controller reset (results in + * SHRD_HW_RST). Turn MAC off before proceeding. + */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* Enable LP XTAL by indirect access through CSR */ + apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | + SHR_APMG_GP1_WF_XTAL_LP_EN | + SHR_APMG_GP1_CHICKEN_BIT_SELECT); + + /* Clear delay line clock power up */ + dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & + ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); + + /* + * Enable persistence mode to avoid LP XTAL resetting when + * SHRD_HW_RST is applied in S3. + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* Activates XTAL resources monitor */ + __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, + CSR_MONITOR_XTAL_RESOURCES); + + /* Release XTAL ON request */ + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + udelay(10); + + /* Release APMG XTAL */ + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, + apmg_xtal_cfg_reg & + ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); +} + +static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) +{ + int ret = 0; + + /* stop device's busmaster DMA activity */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(trans, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (ret < 0) + IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); + + IWL_DEBUG_INFO(trans, "stop master\n"); + + return ret; +} + +static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) +{ + IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); + + if (op_mode_leave) { + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_pcie_apm_init(trans); + + /* inform ME that we are leaving */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_WAKE_ME); + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE | + CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } + mdelay(5); + } + + clear_bit(STATUS_DEVICE_ENABLED, &trans->status); + + /* Stop device's DMA activity */ + iwl_pcie_apm_stop_master(trans); + + if (trans->cfg->lp_xtal_workaround) { + iwl_pcie_apm_lp_xtal_enable(trans); + return; + } + + /* Reset the entire device */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} + +static int iwl_pcie_nic_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* nic_init */ + spin_lock(&trans_pcie->irq_lock); + iwl_pcie_apm_init(trans); + + spin_unlock(&trans_pcie->irq_lock); + + iwl_pcie_set_pwr(trans, false); + + iwl_op_mode_nic_config(trans->op_mode); + + /* Allocate the RX queue, or reset if it is already allocated */ + iwl_pcie_rx_init(trans); + + /* Allocate or reset and init all Tx and Command queues */ + if (iwl_pcie_tx_init(trans)) + return -ENOMEM; + + if (trans->cfg->base_params->shadow_reg_enable) { + /* enable shadow regs in HW */ + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); + IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); + } + + return 0; +} + +#define HW_READY_TIMEOUT (50) + +/* Note: returns poll_bit return value, which is >= 0 if success */ +static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) +{ + int ret; + + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + + if (ret >= 0) + iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); + + IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); + return ret; +} + +/* Note: returns standard 0/-ERROR code */ +static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) +{ + int ret; + int t = 0; + int iter; + + IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); + + ret = iwl_pcie_set_hw_ready(trans); + /* If the card is ready, exit 0 */ + if (ret >= 0) + return 0; + + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + msleep(1); + + for (iter = 0; iter < 10; iter++) { + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + do { + ret = iwl_pcie_set_hw_ready(trans); + if (ret >= 0) + return 0; + + usleep_range(200, 1000); + t += 200; + } while (t < 150000); + msleep(25); + } + + IWL_ERR(trans, "Couldn't prepare the card\n"); + + return ret; +} + +/* + * ucode + */ +static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, + dma_addr_t phy_addr, u32 byte_cnt) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + trans_pcie->ucode_write_complete = false; + + iwl_write_direct32(trans, + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); + + iwl_write_direct32(trans, + FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), + dst_addr); + + iwl_write_direct32(trans, + FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), + phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); + + iwl_write_direct32(trans, + FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), + (iwl_get_dma_hi_addr(phy_addr) + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); + + iwl_write_direct32(trans, + FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); + + iwl_write_direct32(trans, + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); + + ret = wait_event_timeout(trans_pcie->ucode_write_waitq, + trans_pcie->ucode_write_complete, 5 * HZ); + if (!ret) { + IWL_ERR(trans, "Failed to load firmware chunk!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, + const struct fw_desc *section) +{ + u8 *v_addr; + dma_addr_t p_addr; + u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); + int ret = 0; + + IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", + section_num); + + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!v_addr) { + IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); + chunk_sz = PAGE_SIZE; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, + &p_addr, GFP_KERNEL); + if (!v_addr) + return -ENOMEM; + } + + for (offset = 0; offset < section->len; offset += chunk_sz) { + u32 copy_size, dst_addr; + bool extended_addr = false; + + copy_size = min_t(u32, chunk_sz, section->len - offset); + dst_addr = section->offset + offset; + + if (dst_addr >= IWL_FW_MEM_EXTENDED_START && + dst_addr <= IWL_FW_MEM_EXTENDED_END) + extended_addr = true; + + if (extended_addr) + iwl_set_bits_prph(trans, LMPM_CHICK, + LMPM_CHICK_EXTENDED_ADDR_SPACE); + + memcpy(v_addr, (u8 *)section->data + offset, copy_size); + ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, + copy_size); + + if (extended_addr) + iwl_clear_bits_prph(trans, LMPM_CHICK, + LMPM_CHICK_EXTENDED_ADDR_SPACE); + + if (ret) { + IWL_ERR(trans, + "Could not load the [%d] uCode section\n", + section_num); + break; + } + } + + dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); + return ret; +} + +/* + * Driver Takes the ownership on secure machine before FW load + * and prevent race with the BT load. + * W/A for ROM bug. (should be remove in the next Si step) + */ +static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) +{ + u32 val, loop = 1000; + + /* + * Check the RSA semaphore is accessible. + * If the HW isn't locked and the rsa semaphore isn't accessible, + * we are in trouble. + */ + val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); + if (val & (BIT(1) | BIT(17))) { + IWL_INFO(trans, + "can't access the RSA semaphore it is write protected\n"); + return 0; + } + + /* take ownership on the AUX IF */ + iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); + iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); + + do { + iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); + val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); + if (val == 0x1) { + iwl_write_prph(trans, RSA_ENABLE, 0); + return 0; + } + + udelay(10); + loop--; + } while (loop > 0); + + IWL_ERR(trans, "Failed to take ownership on secure machine\n"); + return -EIO; +} + +static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) +{ + int shift_param; + int i, ret = 0, sec_num = 0x1; + u32 val, last_read_idx = 0; + + if (cpu == 1) { + shift_param = 0; + *first_ucode_section = 0; + } else { + shift_param = 16; + (*first_ucode_section)++; + } + + for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + last_read_idx = i; + + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ + if (!image->sec[i].data || + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { + IWL_DEBUG_FW(trans, + "Break since Data not valid or Empty section, sec = %d\n", + i); + break; + } + + ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + if (ret) + return ret; + + /* Notify the ucode of the loaded section number and status */ + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + sec_num = (sec_num << 1) | 0x1; + } + + *first_ucode_section = last_read_idx; + + if (cpu == 1) + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); + else + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); + + return 0; +} + +static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) +{ + int shift_param; + int i, ret = 0; + u32 last_read_idx = 0; + + if (cpu == 1) { + shift_param = 0; + *first_ucode_section = 0; + } else { + shift_param = 16; + (*first_ucode_section)++; + } + + for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + last_read_idx = i; + + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ + if (!image->sec[i].data || + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { + IWL_DEBUG_FW(trans, + "Break since Data not valid or Empty section, sec = %d\n", + i); + break; + } + + ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + if (ret) + return ret; + } + + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + iwl_set_bits_prph(trans, + CSR_UCODE_LOAD_STATUS_ADDR, + (LMPM_CPU_UCODE_LOADING_COMPLETED | + LMPM_CPU_HDRS_LOADING_COMPLETED | + LMPM_CPU_UCODE_LOADING_STARTED) << + shift_param); + + *first_ucode_section = last_read_idx; + + return 0; +} + +static void iwl_pcie_apply_destination(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; + int i; + + if (dest->version) + IWL_ERR(trans, + "DBG DEST version is %d - expect issues\n", + dest->version); + + IWL_INFO(trans, "Applying debug destination %s\n", + get_fw_dbg_mode_string(dest->monitor_mode)); + + if (dest->monitor_mode == EXTERNAL_MODE) + iwl_pcie_alloc_fw_monitor(trans, dest->size_power); + else + IWL_WARN(trans, "PCI should have external buffer debug\n"); + + for (i = 0; i < trans->dbg_dest_reg_num; i++) { + u32 addr = le32_to_cpu(dest->reg_ops[i].addr); + u32 val = le32_to_cpu(dest->reg_ops[i].val); + + switch (dest->reg_ops[i].op) { + case CSR_ASSIGN: + iwl_write32(trans, addr, val); + break; + case CSR_SETBIT: + iwl_set_bit(trans, addr, BIT(val)); + break; + case CSR_CLEARBIT: + iwl_clear_bit(trans, addr, BIT(val)); + break; + case PRPH_ASSIGN: + iwl_write_prph(trans, addr, val); + break; + case PRPH_SETBIT: + iwl_set_bits_prph(trans, addr, BIT(val)); + break; + case PRPH_CLEARBIT: + iwl_clear_bits_prph(trans, addr, BIT(val)); + break; + case PRPH_BLOCKBIT: + if (iwl_read_prph(trans, addr) & BIT(val)) { + IWL_ERR(trans, + "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", + val, addr); + goto monitor; + } + break; + default: + IWL_ERR(trans, "FW debug - unknown OP %d\n", + dest->reg_ops[i].op); + break; + } + } + +monitor: + if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { + iwl_write_prph(trans, le32_to_cpu(dest->base_reg), + trans_pcie->fw_mon_phys >> dest->base_shift); + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size) >> dest->end_shift); + } +} + +static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, + const struct fw_img *image) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = 0; + int first_ucode_section; + + IWL_DEBUG_FW(trans, "working with %s CPU\n", + image->is_dual_cpus ? "Dual" : "Single"); + + /* load to FW the binary non secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); + if (ret) + return ret; + + if (image->is_dual_cpus) { + /* set CPU2 header address */ + iwl_write_prph(trans, + LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, + LMPM_SECURE_CPU2_HDR_MEM_SPACE); + + /* load to FW the binary sections of CPU2 */ + ret = iwl_pcie_load_cpu_sections(trans, image, 2, + &first_ucode_section); + if (ret) + return ret; + } + + /* supported for 7000 only for the moment */ + if (iwlwifi_mod_params.fw_monitor && + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_pcie_alloc_fw_monitor(trans, 0); + + if (trans_pcie->fw_mon_size) { + iwl_write_prph(trans, MON_BUFF_BASE_ADDR, + trans_pcie->fw_mon_phys >> 4); + iwl_write_prph(trans, MON_BUFF_END_ADDR, + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size) >> 4); + } + } else if (trans->dbg_dest_tlv) { + iwl_pcie_apply_destination(trans); + } + + /* release CPU reset */ + iwl_write32(trans, CSR_RESET, 0); + + return 0; +} + +static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, + const struct fw_img *image) +{ + int ret = 0; + int first_ucode_section; + + IWL_DEBUG_FW(trans, "working with %s CPU\n", + image->is_dual_cpus ? "Dual" : "Single"); + + if (trans->dbg_dest_tlv) + iwl_pcie_apply_destination(trans); + + /* TODO: remove in the next Si step */ + ret = iwl_pcie_rsa_race_bug_wa(trans); + if (ret) + return ret; + + /* configure the ucode to be ready to get the secured image */ + /* release CPU reset */ + iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); + + /* load to FW the binary Secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, + &first_ucode_section); + if (ret) + return ret; + + /* load to FW the binary sections of CPU2 */ + return iwl_pcie_load_cpu_sections_8000(trans, image, 2, + &first_ucode_section); +} + +static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + int ret; + + mutex_lock(&trans_pcie->mutex); + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = EIO; + goto out; + } + + /* This may fail if AMT took ownership of the device */ + if (iwl_pcie_prepare_card_hw(trans)) { + IWL_WARN(trans, "Exit HW not ready\n"); + ret = -EIO; + goto out; + } + + iwl_enable_rfkill_int(trans); + + /* If platform's RF_KILL switch is NOT set to KILL */ + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } + + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + ret = iwl_pcie_nic_init(trans); + if (ret) { + IWL_ERR(trans, "Unable to init nic\n"); + goto out; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(trans); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Load the given image to the HW */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + ret = iwl_pcie_load_given_ucode_8000(trans, fw); + else + ret = iwl_pcie_load_given_ucode(trans, fw); + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; +} + +static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + iwl_pcie_reset_ict(trans); + iwl_pcie_tx_start(trans, scd_addr); +} + +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill, was_hw_rfkill; + + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + + was_hw_rfkill = iwl_is_rfkill_set(trans); + + /* tell the device to stop sending interrupts */ + spin_lock(&trans_pcie->irq_lock); + iwl_disable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + + /* device going down, Stop using ICT table */ + iwl_pcie_disable_ict(trans); + + /* + * If a HW restart happens during firmware loading, + * then the firmware loading might call this function + * and later it might be called again due to the + * restart. So don't process again if the device is + * already dead. + */ + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_tx_stop(trans); + iwl_pcie_rx_stop(trans); + + /* Power-down device's busmaster DMA clocks */ + if (!trans->cfg->apmg_not_supported) { + iwl_write_prph(trans, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + } + } + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_pcie_apm_stop(trans, false); + + /* stop and reset the on-board processor */ + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + udelay(20); + + /* + * Upon stop, the APM issues an interrupt if HW RF kill is set. + * This is a bug in certain verions of the hardware. + * Certain devices also keep sending HW RF kill interrupt all + * the time, unless the interrupt is ACKed even if the interrupt + * should be masked. Re-ACK all the interrupts here. + */ + spin_lock(&trans_pcie->irq_lock); + iwl_disable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + + + /* clear all status bits */ + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + clear_bit(STATUS_INT_ENABLED, &trans->status); + clear_bit(STATUS_TPOWER_PMI, &trans->status); + clear_bit(STATUS_RFKILL, &trans->status); + + /* + * Even if we stop the HW, we still want the RF kill + * interrupt + */ + iwl_enable_rfkill_int(trans); + + /* + * Check again since the RF kill state may have changed while + * all the interrupts were disabled, in this case we couldn't + * receive the RF kill interrupt and update the state in the + * op_mode. + * Don't call the op_mode if the rkfill state hasn't changed. + * This allows the op_mode to call stop_device from the rfkill + * notification without endless recursion. Under very rare + * circumstances, we might have a small recursion if the rfkill + * state changed exactly now while we were called from stop_device. + * This is very unlikely but can happen and is supported. + */ + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + if (hw_rfkill != was_hw_rfkill) + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + /* re-take ownership to prevent other users from stealing the deivce */ + iwl_pcie_prepare_card_hw(trans); +} + +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + _iwl_trans_pcie_stop_device(trans, low_power); + mutex_unlock(&trans_pcie->mutex); +} + +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) +{ + struct iwl_trans_pcie __maybe_unused *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) + _iwl_trans_pcie_stop_device(trans, true); +} + +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->wowlan_d0i3) { + /* Enable persistence mode to avoid reset */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + } + + iwl_disable_interrupts(trans); + + /* + * in testing mode, the host stays awake and the + * hardware won't be reset (not even partially) + */ + if (test) + return; + + iwl_pcie_disable_ict(trans); + + synchronize_irq(trans_pcie->pci_dev->irq); + + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + if (!trans->wowlan_d0i3) { + /* + * reset TX queues -- some of their registers reset during S3 + * so if we don't reset everything here the D3 image would try + * to execute some invalid memory upon resume + */ + iwl_trans_pcie_tx_reset(trans); + } + + iwl_pcie_set_pwr(trans, true); +} + +static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test) +{ + u32 val; + int ret; + + if (test) { + iwl_enable_interrupts(trans); + *status = IWL_D3_STATUS_ALIVE; + return 0; + } + + /* + * Also enables interrupts - none will happen as the device doesn't + * know we're waking it up, only when the opmode actually tells it + * after this call. + */ + iwl_pcie_reset_ict(trans); + + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + udelay(2); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (ret < 0) { + IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); + return ret; + } + + iwl_pcie_set_pwr(trans, false); + + if (trans->wowlan_d0i3) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } else { + iwl_trans_pcie_tx_reset(trans); + + ret = iwl_pcie_rx_init(trans); + if (ret) { + IWL_ERR(trans, + "Failed to resume the device (RX reset)\n"); + return ret; + } + } + + val = iwl_read32(trans, CSR_RESET); + if (val & CSR_RESET_REG_FLAG_NEVO_RESET) + *status = IWL_D3_STATUS_RESET; + else + *status = IWL_D3_STATUS_ALIVE; + + return 0; +} + +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + int err; + + lockdep_assert_held(&trans_pcie->mutex); + + err = iwl_pcie_prepare_card_hw(trans); + if (err) { + IWL_ERR(trans, "Error while preparing HW: %d\n", err); + return err; + } + + /* Reset the entire device */ + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + usleep_range(10, 15); + + iwl_pcie_apm_init(trans); + + /* From now on, the op_mode will be kept updated about RF kill state */ + iwl_enable_rfkill_int(trans); + + /* Set is_down to false here so that...*/ + trans_pcie->is_down = false; + + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + /* ... rfkill can call stop_device and set it false if needed */ + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + return 0; +} + +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + mutex_lock(&trans_pcie->mutex); + ret = _iwl_trans_pcie_start_hw(trans, low_power); + mutex_unlock(&trans_pcie->mutex); + + return ret; +} + +static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + + /* disable interrupts - don't enable HW RF kill interrupt */ + spin_lock(&trans_pcie->irq_lock); + iwl_disable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + + iwl_pcie_apm_stop(trans, true); + + spin_lock(&trans_pcie->irq_lock); + iwl_disable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + + iwl_pcie_disable_ict(trans); + + mutex_unlock(&trans_pcie->mutex); + + synchronize_irq(trans_pcie->pci_dev->irq); +} + +static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) +{ + writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); +} + +static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) +{ + writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); +} + +static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) +{ + return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); +} + +static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) +{ + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, + ((reg & 0x000FFFFF) | (3 << 24))); + return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); +} + +static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, + u32 val) +{ + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, + ((addr & 0x000FFFFF) | (3 << 24))); + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); +} + +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +{ + WARN_ON(1); + return 0; +} + +static void iwl_trans_pcie_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + trans_pcie->cmd_queue = trans_cfg->cmd_queue; + trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; + trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; + if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) + trans_pcie->n_no_reclaim_cmds = 0; + else + trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; + if (trans_pcie->n_no_reclaim_cmds) + memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, + trans_pcie->n_no_reclaim_cmds * sizeof(u8)); + + trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; + if (trans_pcie->rx_buf_size_8k) + trans_pcie->rx_page_order = get_order(8 * 1024); + else + trans_pcie->rx_page_order = get_order(4 * 1024); + + trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; + trans_pcie->command_names = trans_cfg->command_names; + trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; + trans_pcie->scd_set_active = trans_cfg->scd_set_active; + + /* init ref_count to 1 (should be cleared when ucode is loaded) */ + trans_pcie->ref_count = 1; + + /* Initialize NAPI here - it should be before registering to mac80211 + * in the opmode but after the HW struct is allocated. + * As this function may be called again in some corner cases don't + * do anything if NAPI was already initialized. + */ + if (!trans_pcie->napi.poll) { + init_dummy_netdev(&trans_pcie->napi_dev); + netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, + iwl_pcie_dummy_napi_poll, 64); + } +} + +void iwl_trans_pcie_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + synchronize_irq(trans_pcie->pci_dev->irq); + + iwl_pcie_tx_free(trans); + iwl_pcie_rx_free(trans); + + free_irq(trans_pcie->pci_dev->irq, trans); + iwl_pcie_free_ict(trans); + + pci_disable_msi(trans_pcie->pci_dev); + iounmap(trans_pcie->hw_base); + pci_release_regions(trans_pcie->pci_dev); + pci_disable_device(trans_pcie->pci_dev); + + if (trans_pcie->napi.poll) + netif_napi_del(&trans_pcie->napi); + + iwl_pcie_free_fw_monitor(trans); + + iwl_trans_free(trans); +} + +static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) +{ + if (state) + set_bit(STATUS_TPOWER_PMI, &trans->status); + else + clear_bit(STATUS_TPOWER_PMI, &trans->status); +} + +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, + unsigned long *flags) +{ + int ret; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + spin_lock_irqsave(&trans_pcie->reg_lock, *flags); + + if (trans_pcie->cmd_hold_nic_awake) + goto out; + + /* this bit wakes up the NIC */ + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + udelay(2); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + * 5000 series and later (including 1000 series) have non-volatile SRAM, + * and do not save/restore SRAM when power cycling. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (unlikely(ret < 0)) { + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); + if (!silent) { + u32 val = iwl_read32(trans, CSR_GP_CNTRL); + WARN_ONCE(1, + "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", + val); + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); + return false; + } + } + +out: + /* + * Fool sparse by faking we release the lock - sparse will + * track nic_access anyway. + */ + __release(&trans_pcie->reg_lock); + return true; +} + +static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, + unsigned long *flags) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + /* + * Fool sparse by faking we acquiring the lock - sparse will + * track nic_access anyway. + */ + __acquire(&trans_pcie->reg_lock); + + if (trans_pcie->cmd_hold_nic_awake) + goto out; + + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + /* + * Above we read the CSR_GP_CNTRL register, which will flush + * any previous writes, but we need the write that clears the + * MAC_ACCESS_REQ bit to be performed before any other writes + * scheduled on different CPUs (after we drop reg_lock). + */ + mmiowb(); +out: + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); +} + +static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + unsigned long flags; + int offs, ret = 0; + u32 *vals = buf; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); + for (offs = 0; offs < dwords; offs++) + vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + iwl_trans_release_nic_access(trans, &flags); + } else { + ret = -EBUSY; + } + return ret; +} + +static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) +{ + unsigned long flags; + int offs, ret = 0; + const u32 *vals = buf; + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); + for (offs = 0; offs < dwords; offs++) + iwl_write32(trans, HBUS_TARG_MEM_WDAT, + vals ? vals[offs] : 0); + iwl_trans_release_nic_access(trans, &flags); + } else { + ret = -EBUSY; + } + return ret; +} + +static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, + bool freeze) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int queue; + + for_each_set_bit(queue, &txqs, BITS_PER_LONG) { + struct iwl_txq *txq = &trans_pcie->txq[queue]; + unsigned long now; + + spin_lock_bh(&txq->lock); + + now = jiffies; + + if (txq->frozen == freeze) + goto next_queue; + + IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", + freeze ? "Freezing" : "Waking", queue); + + txq->frozen = freeze; + + if (txq->q.read_ptr == txq->q.write_ptr) + goto next_queue; + + if (freeze) { + if (unlikely(time_after(now, + txq->stuck_timer.expires))) { + /* + * The timer should have fired, maybe it is + * spinning right now on the lock. + */ + goto next_queue; + } + /* remember how long until the timer fires */ + txq->frozen_expiry_remainder = + txq->stuck_timer.expires - now; + del_timer(&txq->stuck_timer); + goto next_queue; + } + + /* + * Wake a non-empty queue -> arm timer with the + * remainder before it froze + */ + mod_timer(&txq->stuck_timer, + now + txq->frozen_expiry_remainder); + +next_queue: + spin_unlock_bh(&txq->lock); + } +} + +#define IWL_FLUSH_WAIT_MS 2000 + +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq; + struct iwl_queue *q; + int cnt; + unsigned long now = jiffies; + u32 scd_sram_addr; + u8 buf[16]; + int ret = 0; + + /* waiting for all the tx frames complete might take a while */ + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + u8 wr_ptr; + + if (cnt == trans_pcie->cmd_queue) + continue; + if (!test_bit(cnt, trans_pcie->queue_used)) + continue; + if (!(BIT(cnt) & txq_bm)) + continue; + + IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); + txq = &trans_pcie->txq[cnt]; + q = &txq->q; + wr_ptr = ACCESS_ONCE(q->write_ptr); + + while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && + !time_after(jiffies, + now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { + u8 write_ptr = ACCESS_ONCE(q->write_ptr); + + if (WARN_ONCE(wr_ptr != write_ptr, + "WR pointer moved while flushing %d -> %d\n", + wr_ptr, write_ptr)) + return -ETIMEDOUT; + msleep(1); + } + + if (q->read_ptr != q->write_ptr) { + IWL_ERR(trans, + "fail to flush all tx fifo queues Q %d\n", cnt); + ret = -ETIMEDOUT; + break; + } + IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); + } + + if (!ret) + return 0; + + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", + txq->q.read_ptr, txq->q.write_ptr); + + scd_sram_addr = trans_pcie->scd_base_addr + + SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); + iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); + + iwl_print_hex_error(trans, buf, sizeof(buf)); + + for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) + IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, + iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); + + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); + u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + u32 tbl_dw = + iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); + + if (cnt & 0x1) + tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; + else + tbl_dw = tbl_dw & 0x0000FFFF; + + IWL_ERR(trans, + "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", + cnt, active ? "" : "in", fifo, tbl_dw, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); + } + + return ret; +} + +static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); +} + +void iwl_trans_pcie_ref(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + + if (iwlwifi_mod_params.d0i3_disable) + return; + + spin_lock_irqsave(&trans_pcie->ref_lock, flags); + IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); + trans_pcie->ref_count++; + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); +} + +void iwl_trans_pcie_unref(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + + if (iwlwifi_mod_params.d0i3_disable) + return; + + spin_lock_irqsave(&trans_pcie->ref_lock, flags); + IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); + if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) { + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); + return; + } + trans_pcie->ref_count--; + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); +} + +static const char *get_csr_string(int cmd) +{ +#define IWL_CMD(x) case x: return #x + switch (cmd) { + IWL_CMD(CSR_HW_IF_CONFIG_REG); + IWL_CMD(CSR_INT_COALESCING); + IWL_CMD(CSR_INT); + IWL_CMD(CSR_INT_MASK); + IWL_CMD(CSR_FH_INT_STATUS); + IWL_CMD(CSR_GPIO_IN); + IWL_CMD(CSR_RESET); + IWL_CMD(CSR_GP_CNTRL); + IWL_CMD(CSR_HW_REV); + IWL_CMD(CSR_EEPROM_REG); + IWL_CMD(CSR_EEPROM_GP); + IWL_CMD(CSR_OTP_GP_REG); + IWL_CMD(CSR_GIO_REG); + IWL_CMD(CSR_GP_UCODE_REG); + IWL_CMD(CSR_GP_DRIVER_REG); + IWL_CMD(CSR_UCODE_DRV_GP1); + IWL_CMD(CSR_UCODE_DRV_GP2); + IWL_CMD(CSR_LED_REG); + IWL_CMD(CSR_DRAM_INT_TBL_REG); + IWL_CMD(CSR_GIO_CHICKEN_BITS); + IWL_CMD(CSR_ANA_PLL_CFG); + IWL_CMD(CSR_HW_REV_WA_REG); + IWL_CMD(CSR_MONITOR_STATUS_REG); + IWL_CMD(CSR_DBG_HPET_MEM_REG); + default: + return "UNKNOWN"; + } +#undef IWL_CMD +} + +void iwl_pcie_dump_csr(struct iwl_trans *trans) +{ + int i; + static const u32 csr_tbl[] = { + CSR_HW_IF_CONFIG_REG, + CSR_INT_COALESCING, + CSR_INT, + CSR_INT_MASK, + CSR_FH_INT_STATUS, + CSR_GPIO_IN, + CSR_RESET, + CSR_GP_CNTRL, + CSR_HW_REV, + CSR_EEPROM_REG, + CSR_EEPROM_GP, + CSR_OTP_GP_REG, + CSR_GIO_REG, + CSR_GP_UCODE_REG, + CSR_GP_DRIVER_REG, + CSR_UCODE_DRV_GP1, + CSR_UCODE_DRV_GP2, + CSR_LED_REG, + CSR_DRAM_INT_TBL_REG, + CSR_GIO_CHICKEN_BITS, + CSR_ANA_PLL_CFG, + CSR_MONITOR_STATUS_REG, + CSR_HW_REV_WA_REG, + CSR_DBG_HPET_MEM_REG + }; + IWL_ERR(trans, "CSR values:\n"); + IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " + "CSR_INT_PERIODIC_REG)\n"); + for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { + IWL_ERR(trans, " %25s: 0X%08x\n", + get_csr_string(csr_tbl[i]), + iwl_read32(trans, csr_tbl[i])); + } +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, trans, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq; + struct iwl_queue *q; + char *buf; + int pos = 0; + int cnt; + int ret; + size_t bufsz; + + bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; + + if (!trans_pcie->txq) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + txq = &trans_pcie->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", + cnt, q->read_ptr, q->write_ptr, + !!test_bit(cnt, trans_pcie->queue_used), + !!test_bit(cnt, trans_pcie->queue_stopped), + txq->need_update, txq->frozen, + (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", + rxq->write_actual); + pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", + rxq->need_update); + pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "closed_rb_num: Not Allocated\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_interrupt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + int pos = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, + "Interrupt Statistics Report:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + isr_stats->hw); + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + isr_stats->sw); + if (isr_stats->sw || isr_stats->hw) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + isr_stats->err_code); + } +#ifdef CONFIG_IWLWIFI_DEBUG + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + isr_stats->sch); + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + isr_stats->alive); +#endif + pos += scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); + + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + isr_stats->ctkill); + + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + isr_stats->wakeup); + + pos += scnprintf(buf + pos, bufsz - pos, + "Rx command responses:\t\t %u\n", isr_stats->rx); + + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + isr_stats->tx); + + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + isr_stats->unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_interrupt_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + memset(isr_stats, 0, sizeof(*isr_stats)); + + return count; +} + +static ssize_t iwl_dbgfs_csr_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + char buf[8]; + int buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &csr) != 1) + return -EFAULT; + + iwl_pcie_dump_csr(trans); + + return count; +} + +static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + char *buf = NULL; + ssize_t ret; + + ret = iwl_dump_fh(trans, &buf); + if (ret < 0) + return ret; + if (!buf) + return -EINVAL; + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + kfree(buf); + return ret; +} + +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_READ_FILE_OPS(tx_queue); +DEBUGFS_WRITE_FILE_OPS(csr); + +/* + * Create the debugfs files and directories + * + */ +static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, + struct dentry *dir) +{ + DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); + DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); + DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); + DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); + return 0; + +err: + IWL_ERR(trans, "failed to create the trans debugfs entry\n"); + return -ENOMEM; +} +#else +static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, + struct dentry *dir) +{ + return 0; +} +#endif /*CONFIG_IWLWIFI_DEBUGFS */ + +static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) +{ + u32 cmdlen = 0; + int i; + + for (i = 0; i < IWL_NUM_OF_TBS; i++) + cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); + + return cmdlen; +} + +static const struct { + u32 start, end; +} iwl_prph_dump_addr[] = { + { .start = 0x00a00000, .end = 0x00a00000 }, + { .start = 0x00a0000c, .end = 0x00a00024 }, + { .start = 0x00a0002c, .end = 0x00a0003c }, + { .start = 0x00a00410, .end = 0x00a00418 }, + { .start = 0x00a00420, .end = 0x00a00420 }, + { .start = 0x00a00428, .end = 0x00a00428 }, + { .start = 0x00a00430, .end = 0x00a0043c }, + { .start = 0x00a00444, .end = 0x00a00444 }, + { .start = 0x00a004c0, .end = 0x00a004cc }, + { .start = 0x00a004d8, .end = 0x00a004d8 }, + { .start = 0x00a004e0, .end = 0x00a004f0 }, + { .start = 0x00a00840, .end = 0x00a00840 }, + { .start = 0x00a00850, .end = 0x00a00858 }, + { .start = 0x00a01004, .end = 0x00a01008 }, + { .start = 0x00a01010, .end = 0x00a01010 }, + { .start = 0x00a01018, .end = 0x00a01018 }, + { .start = 0x00a01024, .end = 0x00a01024 }, + { .start = 0x00a0102c, .end = 0x00a01034 }, + { .start = 0x00a0103c, .end = 0x00a01040 }, + { .start = 0x00a01048, .end = 0x00a01094 }, + { .start = 0x00a01c00, .end = 0x00a01c20 }, + { .start = 0x00a01c58, .end = 0x00a01c58 }, + { .start = 0x00a01c7c, .end = 0x00a01c7c }, + { .start = 0x00a01c28, .end = 0x00a01c54 }, + { .start = 0x00a01c5c, .end = 0x00a01c5c }, + { .start = 0x00a01c60, .end = 0x00a01cdc }, + { .start = 0x00a01ce0, .end = 0x00a01d0c }, + { .start = 0x00a01d18, .end = 0x00a01d20 }, + { .start = 0x00a01d2c, .end = 0x00a01d30 }, + { .start = 0x00a01d40, .end = 0x00a01d5c }, + { .start = 0x00a01d80, .end = 0x00a01d80 }, + { .start = 0x00a01d98, .end = 0x00a01d9c }, + { .start = 0x00a01da8, .end = 0x00a01da8 }, + { .start = 0x00a01db8, .end = 0x00a01df4 }, + { .start = 0x00a01dc0, .end = 0x00a01dfc }, + { .start = 0x00a01e00, .end = 0x00a01e2c }, + { .start = 0x00a01e40, .end = 0x00a01e60 }, + { .start = 0x00a01e68, .end = 0x00a01e6c }, + { .start = 0x00a01e74, .end = 0x00a01e74 }, + { .start = 0x00a01e84, .end = 0x00a01e90 }, + { .start = 0x00a01e9c, .end = 0x00a01ec4 }, + { .start = 0x00a01ed0, .end = 0x00a01ee0 }, + { .start = 0x00a01f00, .end = 0x00a01f1c }, + { .start = 0x00a01f44, .end = 0x00a01ffc }, + { .start = 0x00a02000, .end = 0x00a02048 }, + { .start = 0x00a02068, .end = 0x00a020f0 }, + { .start = 0x00a02100, .end = 0x00a02118 }, + { .start = 0x00a02140, .end = 0x00a0214c }, + { .start = 0x00a02168, .end = 0x00a0218c }, + { .start = 0x00a021c0, .end = 0x00a021c0 }, + { .start = 0x00a02400, .end = 0x00a02410 }, + { .start = 0x00a02418, .end = 0x00a02420 }, + { .start = 0x00a02428, .end = 0x00a0242c }, + { .start = 0x00a02434, .end = 0x00a02434 }, + { .start = 0x00a02440, .end = 0x00a02460 }, + { .start = 0x00a02468, .end = 0x00a024b0 }, + { .start = 0x00a024c8, .end = 0x00a024cc }, + { .start = 0x00a02500, .end = 0x00a02504 }, + { .start = 0x00a0250c, .end = 0x00a02510 }, + { .start = 0x00a02540, .end = 0x00a02554 }, + { .start = 0x00a02580, .end = 0x00a025f4 }, + { .start = 0x00a02600, .end = 0x00a0260c }, + { .start = 0x00a02648, .end = 0x00a02650 }, + { .start = 0x00a02680, .end = 0x00a02680 }, + { .start = 0x00a026c0, .end = 0x00a026d0 }, + { .start = 0x00a02700, .end = 0x00a0270c }, + { .start = 0x00a02804, .end = 0x00a02804 }, + { .start = 0x00a02818, .end = 0x00a0281c }, + { .start = 0x00a02c00, .end = 0x00a02db4 }, + { .start = 0x00a02df4, .end = 0x00a02fb0 }, + { .start = 0x00a03000, .end = 0x00a03014 }, + { .start = 0x00a0301c, .end = 0x00a0302c }, + { .start = 0x00a03034, .end = 0x00a03038 }, + { .start = 0x00a03040, .end = 0x00a03048 }, + { .start = 0x00a03060, .end = 0x00a03068 }, + { .start = 0x00a03070, .end = 0x00a03074 }, + { .start = 0x00a0307c, .end = 0x00a0307c }, + { .start = 0x00a03080, .end = 0x00a03084 }, + { .start = 0x00a0308c, .end = 0x00a03090 }, + { .start = 0x00a03098, .end = 0x00a03098 }, + { .start = 0x00a030a0, .end = 0x00a030a0 }, + { .start = 0x00a030a8, .end = 0x00a030b4 }, + { .start = 0x00a030bc, .end = 0x00a030bc }, + { .start = 0x00a030c0, .end = 0x00a0312c }, + { .start = 0x00a03c00, .end = 0x00a03c5c }, + { .start = 0x00a04400, .end = 0x00a04454 }, + { .start = 0x00a04460, .end = 0x00a04474 }, + { .start = 0x00a044c0, .end = 0x00a044ec }, + { .start = 0x00a04500, .end = 0x00a04504 }, + { .start = 0x00a04510, .end = 0x00a04538 }, + { .start = 0x00a04540, .end = 0x00a04548 }, + { .start = 0x00a04560, .end = 0x00a0457c }, + { .start = 0x00a04590, .end = 0x00a04598 }, + { .start = 0x00a045c0, .end = 0x00a045f4 }, +}; + +static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + struct iwl_fw_error_dump_prph *prph; + unsigned long flags; + u32 prph_len = 0, i; + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + return 0; + + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + int reg; + __le32 *val; + + prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); + (*data)->len = cpu_to_le32(sizeof(*prph) + + num_bytes_in_chunk); + prph = (void *)(*data)->data; + prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); + val = (void *)prph->data; + + for (reg = iwl_prph_dump_addr[i].start; + reg <= iwl_prph_dump_addr[i].end; + reg += 4) + *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, + reg)); + *data = iwl_fw_error_next_data(*data); + } + + iwl_trans_release_nic_access(trans, &flags); + + return prph_len; +} + +static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + int allocated_rb_nums) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 i, r, j, rb_len = 0; + + spin_lock(&rxq->lock); + + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + + for (i = rxq->read, j = 0; + i != r && j < allocated_rb_nums; + i = (i + 1) & RX_QUEUE_MASK, j++) { + struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; + struct iwl_fw_error_dump_rb *rb; + + dma_unmap_page(trans->dev, rxb->page_dma, max_len, + DMA_FROM_DEVICE); + + rb_len += sizeof(**data) + sizeof(*rb) + max_len; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); + (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); + rb = (void *)(*data)->data; + rb->index = cpu_to_le32(i); + memcpy(rb->data, page_address(rxb->page), max_len); + /* remap the page for the free benefit */ + rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, + max_len, + DMA_FROM_DEVICE); + + *data = iwl_fw_error_next_data(*data); + } + + spin_unlock(&rxq->lock); + + return rb_len; +} +#define IWL_CSR_TO_DUMP (0x250) + +static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; + __le32 *val; + int i; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); + (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); + val = (void *)(*data)->data; + + for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); + + *data = iwl_fw_error_next_data(*data); + + return csr_len; +} + +static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; + unsigned long flags; + __le32 *val; + int i; + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + return 0; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); + (*data)->len = cpu_to_le32(fh_regs_len); + val = (void *)(*data)->data; + + for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); + + iwl_trans_release_nic_access(trans, &flags); + + *data = iwl_fw_error_next_data(*data); + + return sizeof(**data) + fh_regs_len; +} + +static u32 +iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_fw_mon *fw_mon_data, + u32 monitor_len) +{ + u32 buf_size_in_dwords = (monitor_len >> 2); + u32 *buffer = (u32 *)fw_mon_data->data; + unsigned long flags; + u32 i; + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + return 0; + + __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1); + for (i = 0; i < buf_size_in_dwords; i++) + buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR); + __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0); + + iwl_trans_release_nic_access(trans, &flags); + + return monitor_len; +} + +static u32 +iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + u32 monitor_len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 len = 0; + + if ((trans_pcie->fw_mon_page && + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || + trans->dbg_dest_tlv) { + struct iwl_fw_error_dump_fw_mon *fw_mon_data; + u32 base, write_ptr, wrap_cnt; + + /* If there was a dest TLV - use the values from there */ + if (trans->dbg_dest_tlv) { + write_ptr = + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); + fw_mon_data = (void *)(*data)->data; + fw_mon_data->fw_mon_wr_ptr = + cpu_to_le32(iwl_read_prph(trans, write_ptr)); + fw_mon_data->fw_mon_cycle_cnt = + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); + fw_mon_data->fw_mon_base_ptr = + cpu_to_le32(iwl_read_prph(trans, base)); + + len += sizeof(**data) + sizeof(*fw_mon_data); + if (trans_pcie->fw_mon_page) { + /* + * The firmware is now asserted, it won't write anything + * to the buffer. CPU can take ownership to fetch the + * data. The buffer will be handed back to the device + * before the firmware will be restarted. + */ + dma_sync_single_for_cpu(trans->dev, + trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, + DMA_FROM_DEVICE); + memcpy(fw_mon_data->data, + page_address(trans_pcie->fw_mon_page), + trans_pcie->fw_mon_size); + + monitor_len = trans_pcie->fw_mon_size; + } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { + /* + * Update pointers to reflect actual values after + * shifting + */ + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + iwl_trans_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); + } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { + monitor_len = + iwl_trans_pci_dump_marbh_monitor(trans, + fw_mon_data, + monitor_len); + } else { + /* Didn't match anything - output no monitor data */ + monitor_len = 0; + } + + len += monitor_len; + (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); + } + + return len; +} + +static struct iwl_trans_dump_data +*iwl_trans_pcie_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_fw_error_dump_data *data; + struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_fw_error_dump_txcmd *txcmd; + struct iwl_trans_dump_data *dump_data; + u32 len, num_rbs; + u32 monitor_len; + int i, ptr; + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); + + /* transport dump header */ + len = sizeof(*dump_data); + + /* host commands */ + len += sizeof(*data) + + cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); + + /* FW monitor */ + if (trans_pcie->fw_mon_page) { + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + + trans_pcie->fw_mon_size; + monitor_len = trans_pcie->fw_mon_size; + } else if (trans->dbg_dest_tlv) { + u32 base, end; + + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); + + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + end = iwl_read_prph(trans, end) << + trans->dbg_dest_tlv->end_shift; + + /* Make "end" point to the actual end */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 || + trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) + end += (1 << trans->dbg_dest_tlv->end_shift); + monitor_len = end - base; + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + + monitor_len; + } else { + monitor_len = 0; + } + + if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { + dump_data = vzalloc(len); + if (!dump_data) + return NULL; + + data = (void *)dump_data->data; + len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + dump_data->len = len; + + return dump_data; + } + + /* CSR registers */ + len += sizeof(*data) + IWL_CSR_TO_DUMP; + + /* PRPH registers */ + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + + /* FH registers */ + len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + + if (dump_rbs) { + /* RBs */ + num_rbs = le16_to_cpu(ACCESS_ONCE( + trans_pcie->rxq.rb_stts->closed_rb_num)) + & 0x0FFF; + num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + len += num_rbs * (sizeof(*data) + + sizeof(struct iwl_fw_error_dump_rb) + + (PAGE_SIZE << trans_pcie->rx_page_order)); + } + + dump_data = vzalloc(len); + if (!dump_data) + return NULL; + + len = 0; + data = (void *)dump_data->data; + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); + txcmd = (void *)data->data; + spin_lock_bh(&cmdq->lock); + ptr = cmdq->q.write_ptr; + for (i = 0; i < cmdq->q.n_window; i++) { + u8 idx = get_cmd_index(&cmdq->q, ptr); + u32 caplen, cmdlen; + + cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]); + caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + + if (cmdlen) { + len += sizeof(*txcmd) + caplen; + txcmd->cmdlen = cpu_to_le32(cmdlen); + txcmd->caplen = cpu_to_le32(caplen); + memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); + txcmd = (void *)((u8 *)txcmd->data + caplen); + } + + ptr = iwl_queue_dec_wrap(ptr); + } + spin_unlock_bh(&cmdq->lock); + + data->len = cpu_to_le32(len); + len += sizeof(*data); + data = iwl_fw_error_next_data(data); + + len += iwl_trans_pcie_dump_prph(trans, &data); + len += iwl_trans_pcie_dump_csr(trans, &data); + len += iwl_trans_pcie_fh_regs_dump(trans, &data); + if (dump_rbs) + len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); + + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + + dump_data->len = len; + + return dump_data; +} + +static const struct iwl_trans_ops trans_ops_pcie = { + .start_hw = iwl_trans_pcie_start_hw, + .op_mode_leave = iwl_trans_pcie_op_mode_leave, + .fw_alive = iwl_trans_pcie_fw_alive, + .start_fw = iwl_trans_pcie_start_fw, + .stop_device = iwl_trans_pcie_stop_device, + + .d3_suspend = iwl_trans_pcie_d3_suspend, + .d3_resume = iwl_trans_pcie_d3_resume, + + .send_cmd = iwl_trans_pcie_send_hcmd, + + .tx = iwl_trans_pcie_tx, + .reclaim = iwl_trans_pcie_reclaim, + + .txq_disable = iwl_trans_pcie_txq_disable, + .txq_enable = iwl_trans_pcie_txq_enable, + + .dbgfs_register = iwl_trans_pcie_dbgfs_register, + + .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, + .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, + + .write8 = iwl_trans_pcie_write8, + .write32 = iwl_trans_pcie_write32, + .read32 = iwl_trans_pcie_read32, + .read_prph = iwl_trans_pcie_read_prph, + .write_prph = iwl_trans_pcie_write_prph, + .read_mem = iwl_trans_pcie_read_mem, + .write_mem = iwl_trans_pcie_write_mem, + .configure = iwl_trans_pcie_configure, + .set_pmi = iwl_trans_pcie_set_pmi, + .grab_nic_access = iwl_trans_pcie_grab_nic_access, + .release_nic_access = iwl_trans_pcie_release_nic_access, + .set_bits_mask = iwl_trans_pcie_set_bits_mask, + + .ref = iwl_trans_pcie_ref, + .unref = iwl_trans_pcie_unref, + + .dump_data = iwl_trans_pcie_dump_data, +}; + +struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg *cfg) +{ + struct iwl_trans_pcie *trans_pcie; + struct iwl_trans *trans; + u16 pci_cmd; + int ret; + + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie, 0); + if (!trans) + return ERR_PTR(-ENOMEM); + + trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; + + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + trans_pcie->trans = trans; + spin_lock_init(&trans_pcie->irq_lock); + spin_lock_init(&trans_pcie->reg_lock); + spin_lock_init(&trans_pcie->ref_lock); + mutex_init(&trans_pcie->mutex); + init_waitqueue_head(&trans_pcie->ucode_write_waitq); + + ret = pci_enable_device(pdev); + if (ret) + goto out_no_pci; + + if (!cfg->base_params->pcie_l1_allowed) { + /* + * W/A - seems to solve weird behavior. We need to remove this + * if we don't want to stay in L1 all the time. This wastes a + * lot of power. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + } + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (ret) { + dev_err(&pdev->dev, "No suitable DMA available\n"); + goto out_pci_disable_device; + } + } + + ret = pci_request_regions(pdev, DRV_NAME); + if (ret) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto out_pci_disable_device; + } + + trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); + if (!trans_pcie->hw_base) { + dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); + ret = -ENODEV; + goto out_pci_release_regions; + } + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + trans->dev = &pdev->dev; + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + } + } + + trans->hw_rev = iwl_read32(trans, CSR_HW_REV); + /* + * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have + * changed, and now the revision step also includes bit 0-1 (no more + * "dash" value). To keep hw_rev backwards compatible - we'll store it + * in the old format. + */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + unsigned long flags; + + trans->hw_rev = (trans->hw_rev & 0xfff0) | + (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); + + ret = iwl_pcie_prepare_card_hw(trans); + if (ret) { + IWL_WARN(trans, "Exit HW not ready\n"); + goto out_pci_disable_msi; + } + + /* + * in-order to recognize C step driver should read chip version + * id located at the AUX bus MISC address space. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + udelay(2); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (ret < 0) { + IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); + goto out_pci_disable_msi; + } + + if (iwl_trans_grab_nic_access(trans, false, &flags)) { + u32 hw_step; + + hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG); + hw_step |= ENABLE_WFPM; + __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step); + hw_step = __iwl_read_prph(trans, AUX_MISC_REG); + hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; + if (hw_step == 0x3) + trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | + (SILICON_C_STEP << 2); + iwl_trans_release_nic_access(trans, &flags); + } + } + + trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; + snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), + "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); + + /* Initialize the wait queue for commands */ + init_waitqueue_head(&trans_pcie->wait_command_queue); + + ret = iwl_pcie_alloc_ict(trans); + if (ret) + goto out_pci_disable_msi; + + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans); + if (ret) { + IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); + goto out_free_ict; + } + + trans_pcie->inta_mask = CSR_INI_SET_MASK; + trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND; + + return trans; + +out_free_ict: + iwl_pcie_free_ict(trans); +out_pci_disable_msi: + pci_disable_msi(pdev); +out_pci_release_regions: + pci_release_regions(pdev); +out_pci_disable_device: + pci_disable_device(pdev); +out_no_pci: + iwl_trans_free(trans); + return ERR_PTR(ret); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c new file mode 100644 index 000000000000..a8c8a4a7420b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -0,0 +1,1988 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include + +#include "iwl-debug.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-io.h" +#include "iwl-scd.h" +#include "iwl-op-mode.h" +#include "internal.h" +/* FIXME: need to abstract out TX command (once we know what it looks like) */ +#include "dvm/commands.h" + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + ***************************************************/ +static int iwl_queue_space(const struct iwl_queue *q) +{ + unsigned int max; + unsigned int used; + + /* + * To avoid ambiguity between empty and completely full queues, there + * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. + * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need + * to reserve any queue entries for this purpose. + */ + if (q->n_window < TFD_QUEUE_SIZE_MAX) + max = q->n_window; + else + max = TFD_QUEUE_SIZE_MAX - 1; + + /* + * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to + * modulo by TFD_QUEUE_SIZE_MAX and is well defined. + */ + used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); + + if (WARN_ON(used > max)) + return 0; + + return max - used; +} + +/* + * iwl_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) +{ + q->n_window = slots_num; + q->id = id; + + /* slots_num must be power-of-two size, otherwise + * get_cmd_index is broken. */ + if (WARN_ON(!is_power_of_2(slots_num))) + return -EINVAL; + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = 0; + q->read_ptr = 0; + + return 0; +} + +static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size) +{ + if (WARN_ON(ptr->addr)) + return -EINVAL; + + ptr->addr = dma_alloc_coherent(trans->dev, size, + &ptr->dma, GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +static void iwl_pcie_txq_stuck_timer(unsigned long data) +{ + struct iwl_txq *txq = (void *)data; + struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; + struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); + u32 scd_sram_addr = trans_pcie->scd_base_addr + + SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); + u8 buf[16]; + int i; + + spin_lock(&txq->lock); + /* check if triggered erroneously */ + if (txq->q.read_ptr == txq->q.write_ptr) { + spin_unlock(&txq->lock); + return; + } + spin_unlock(&txq->lock); + + IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, + jiffies_to_msecs(txq->wd_timeout)); + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", + txq->q.read_ptr, txq->q.write_ptr); + + iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); + + iwl_print_hex_error(trans, buf, sizeof(buf)); + + for (i = 0; i < FH_TCSR_CHNL_NUM; i++) + IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, + iwl_read_direct32(trans, FH_TX_TRB_REG(i))); + + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); + u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + u32 tbl_dw = + iwl_trans_read_mem32(trans, + trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(i)); + + if (i & 0x1) + tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; + else + tbl_dw = tbl_dw & 0x0000FFFF; + + IWL_ERR(trans, + "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", + i, active ? "" : "in", fifo, tbl_dw, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); + } + + iwl_force_nmi(trans); +} + +/* + * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt) +{ + struct iwlagn_scd_bc_tbl *scd_bc_tbl; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int write_ptr = txq->q.write_ptr; + int txq_id = txq->q.id; + u8 sec_ctl = 0; + u8 sta_id = 0; + u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + struct iwl_tx_cmd *tx_cmd = + (void *) txq->entries[txq->q.write_ptr].cmd->payload; + + scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; + + sta_id = tx_cmd->sta_id; + sec_ctl = tx_cmd->sec_ctl; + + switch (sec_ctl & TX_CMD_SEC_MSK) { + case TX_CMD_SEC_CCM: + len += IEEE80211_CCMP_MIC_LEN; + break; + case TX_CMD_SEC_TKIP: + len += IEEE80211_TKIP_ICV_LEN; + break; + case TX_CMD_SEC_WEP: + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; + break; + } + + if (trans_pcie->bc_table_dword) + len = DIV_ROUND_UP(len, 4); + + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + + bc_ent = cpu_to_le16(len | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; +} + +static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int read_ptr = txq->q.read_ptr; + u8 sta_id = 0; + __le16 bc_ent; + struct iwl_tx_cmd *tx_cmd = + (void *)txq->entries[txq->q.read_ptr].cmd->payload; + + WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != trans_pcie->cmd_queue) + sta_id = tx_cmd->sta_id; + + bc_ent = cpu_to_le16(1 | (sta_id << 12)); + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + + if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; +} + +/* + * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware + */ +static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 reg = 0; + int txq_id = txq->q.id; + + lockdep_assert_held(&txq->lock); + + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. NIC is woken up for CMD regardless of shadow outside this function + * 3. there is a chance that the NIC is asleep + */ + if (!trans->cfg->base_params->shadow_reg_enable && + txq_id != trans_pcie->cmd_queue && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + /* + * wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. + */ + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", + txq_id, reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + txq->need_update = true; + return; + } + } + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); + iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); +} + +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = &trans_pcie->txq[i]; + + spin_lock_bh(&txq->lock); + if (trans_pcie->txq[i].need_update) { + iwl_pcie_txq_inc_wr_ptr(trans, txq); + trans_pcie->txq[i].need_update = false; + } + spin_unlock_bh(&txq->lock); + } +} + +static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfd *tfd) +{ + int i; + int num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* first TB is never freed - it's the scratchbuf data */ + + for (i = 1; i < num_tbs; i++) { + if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + dma_unmap_page(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + } + tfd->num_tbs = 0; +} + +/* + * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @trans - transport private data + * @txq - tx queue + * @dma_dir - the direction of the DMA mapping + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_tfd *tfd_tmp = txq->tfds; + + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int rd_ptr = txq->q.read_ptr; + int idx = get_cmd_index(&txq->q, rd_ptr); + + lockdep_assert_held(&txq->lock); + + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ + iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); + + /* free SKB */ + if (txq->entries) { + struct sk_buff *skb; + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } + } +} + +static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + dma_addr_t addr, u16 len, bool reset) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + if (WARN(addr & ~IWL_TX_DMA_MASK, + "Unaligned address = %llx\n", (unsigned long long)addr)) + return -EINVAL; + + iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); + + return num_tbs; +} + +static int iwl_pcie_txq_alloc(struct iwl_trans *trans, + struct iwl_txq *txq, int slots_num, + u32 txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; + size_t scratchbuf_sz; + int i; + + if (WARN_ON(txq->entries || txq->tfds)) + return -EINVAL; + + setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, + (unsigned long)txq); + txq->trans_pcie = trans_pcie; + + txq->q.n_window = slots_num; + + txq->entries = kcalloc(slots_num, + sizeof(struct iwl_pcie_txq_entry), + GFP_KERNEL); + + if (!txq->entries) + goto error; + + if (txq_id == trans_pcie->cmd_queue) + for (i = 0; i < slots_num; i++) { + txq->entries[i].cmd = + kmalloc(sizeof(struct iwl_device_cmd), + GFP_KERNEL); + if (!txq->entries[i].cmd) + goto error; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, + &txq->q.dma_addr, GFP_KERNEL); + if (!txq->tfds) + goto error; + + BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); + BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch)); + + scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; + + txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, + &txq->scratchbufs_dma, + GFP_KERNEL); + if (!txq->scratchbufs) + goto err_free_tfds; + + txq->q.id = txq_id; + + return 0; +err_free_tfds: + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); +error: + if (txq->entries && txq_id == trans_pcie->cmd_queue) + for (i = 0; i < slots_num; i++) + kfree(txq->entries[i].cmd); + kfree(txq->entries); + txq->entries = NULL; + + return -ENOMEM; + +} + +static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, u32 txq_id) +{ + int ret; + + txq->need_update = false; + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + ret = iwl_queue_init(&txq->q, slots_num, txq_id); + if (ret) + return ret; + + spin_lock_init(&txq->lock); + + /* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * Circular buffer (TFD queue in DRAM) physical base address */ + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/* + * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's + */ +static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_queue *q = &txq->q; + + spin_lock_bh(&txq->lock); + while (q->write_ptr != q->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, q->read_ptr); + iwl_pcie_txq_free_tfd(trans, txq); + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); + } + txq->active = false; + spin_unlock_bh(&txq->lock); + + /* just in case - this queue may have been stopped */ + iwl_wake_queue(trans, txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct device *dev = trans->dev; + int i; + + if (WARN_ON(!txq)) + return; + + iwl_pcie_txq_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans_pcie->cmd_queue) + for (i = 0; i < txq->q.n_window; i++) { + kzfree(txq->entries[i].cmd); + kzfree(txq->entries[i].free_buf); + } + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, + txq->tfds, txq->q.dma_addr); + txq->q.dma_addr = 0; + txq->tfds = NULL; + + dma_free_coherent(dev, + sizeof(*txq->scratchbufs) * txq->q.n_window, + txq->scratchbufs, txq->scratchbufs_dma); + } + + kfree(txq->entries); + txq->entries = NULL; + + del_timer_sync(&txq->stuck_timer); + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} + +void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int nq = trans->cfg->base_params->num_of_queues; + int chan; + u32 reg_val; + int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - + SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); + + /* make sure all queue are not stopped/used */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + trans_pcie->scd_base_addr = + iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); + + WARN_ON(scd_base_addr != 0 && + scd_base_addr != trans_pcie->scd_base_addr); + + /* reset context data, TX status and translation data */ + iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_MEM_LOWER_BOUND, + NULL, clear_dwords); + + iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, + trans_pcie->scd_bc_tbls.dma >> 10); + + /* The chain extension of the SCD doesn't work well. This feature is + * enabled by default by the HW, so we need to disable it manually. + */ + if (trans->cfg->base_params->scd_chain_ext_wa) + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); + + iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, + trans_pcie->cmd_fifo, + trans_pcie->cmd_q_wdg_timeout); + + /* Activate all Tx DMA/FIFO channels */ + iwl_scd_activate_fifos(trans); + + /* Enable DMA channel */ + for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) + iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); + iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Enable L1-Active */ + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); +} + +void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) { + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + iwl_pcie_txq_unmap(trans, txq_id); + txq->q.read_ptr = 0; + txq->q.write_ptr = 0; + } + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + + /* + * Send 0 as the scd_base_addr since the device may have be reset + * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will + * contain garbage. + */ + iwl_pcie_tx_start(trans, 0); +} + +static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + int ch, ret; + u32 mask = 0; + + spin_lock(&trans_pcie->irq_lock); + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + goto out; + + /* Stop each Tx DMA channel */ + for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); + } + + /* Wait for DMA channels to be idle */ + ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); + if (ret < 0) + IWL_ERR(trans, + "Failing on timeout while stopping DMA channel %d [0x%08x]\n", + ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); + + iwl_trans_release_nic_access(trans, &flags); + +out: + spin_unlock(&trans_pcie->irq_lock); +} + +/* + * iwl_pcie_tx_stop - Stop all Tx DMA channels + */ +int iwl_pcie_tx_stop(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + /* Turn off all Tx DMA fifos */ + iwl_scd_deactivate_fifos(trans); + + /* Turn off all Tx DMA channels */ + iwl_pcie_tx_stop_fh(trans); + + /* + * This function can be called before the op_mode disabled the + * queues. This happens when we have an rfkill interrupt. + * Since we stop Tx altogether - mark the queues as stopped. + */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* This can happen: start_hw, stop_device */ + if (!trans_pcie->txq) + return 0; + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) + iwl_pcie_txq_unmap(trans, txq_id); + + return 0; +} + +/* + * iwl_trans_tx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_pcie_tx_free(struct iwl_trans *trans) +{ + int txq_id; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* Tx queues */ + if (trans_pcie->txq) { + for (txq_id = 0; + txq_id < trans->cfg->base_params->num_of_queues; txq_id++) + iwl_pcie_txq_free(trans, txq_id); + } + + kfree(trans_pcie->txq); + trans_pcie->txq = NULL; + + iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); + + iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); +} + +/* + * iwl_pcie_tx_alloc - allocate TX context + * Allocate all Tx DMA structures and initialize them + */ +static int iwl_pcie_tx_alloc(struct iwl_trans *trans) +{ + int ret; + int txq_id, slots_num; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * + sizeof(struct iwlagn_scd_bc_tbl); + + /*It is not allowed to alloc twice, so warn when this happens. + * We cannot rely on the previous allocation, so free and fail */ + if (WARN_ON(trans_pcie->txq)) { + ret = -EINVAL; + goto error; + } + + ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, + scd_bc_tbls_size); + if (ret) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + goto error; + } + + /* Alloc keep-warm buffer */ + ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(trans, "Keep Warm allocation failed\n"); + goto error; + } + + trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); + if (!trans_pcie->txq) { + IWL_ERR(trans, "Not enough memory for txq\n"); + ret = -ENOMEM; + goto error; + } + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) { + slots_num = (txq_id == trans_pcie->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], + slots_num, txq_id); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + } + + return 0; + +error: + iwl_pcie_tx_free(trans); + + return ret; +} +int iwl_pcie_tx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + int txq_id, slots_num; + bool alloc = false; + + if (!trans_pcie->txq) { + ret = iwl_pcie_tx_alloc(trans); + if (ret) + goto error; + alloc = true; + } + + spin_lock(&trans_pcie->irq_lock); + + /* Turn off all Tx DMA fifos */ + iwl_scd_deactivate_fifos(trans); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + + spin_unlock(&trans_pcie->irq_lock); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) { + slots_num = (txq_id == trans_pcie->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], + slots_num, txq_id); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); + if (trans->cfg->base_params->num_of_queues > 20) + iwl_set_bits_prph(trans, SCD_GP_CTRL, + SCD_GP_CTRL_ENABLE_31_QUEUES); + + return 0; +error: + /*Upon error, free only if we allocated something */ + if (alloc) + iwl_pcie_tx_free(trans); + return ret; +} + +static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + if (!txq->wd_timeout) + return; + + /* + * station is asleep and we send data - that must + * be uAPSD or PS-Poll. Don't rearm the timer. + */ + if (txq->frozen) + return; + + /* + * if empty delete timer, otherwise move timer forward + * since we're making progress on this queue + */ + if (txq->q.read_ptr == txq->q.write_ptr) + del_timer(&txq->stuck_timer); + else + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); +} + +/* Frees buffers until index _not_ inclusive */ +void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); + struct iwl_queue *q = &txq->q; + int last_to_free; + + /* This function is not meant to release cmd queue*/ + if (WARN_ON(txq_id == trans_pcie->cmd_queue)) + return; + + spin_lock_bh(&txq->lock); + + if (!txq->active) { + IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", + txq_id, ssn); + goto out; + } + + if (txq->q.read_ptr == tfd_num) + goto out; + + IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", + txq_id, txq->q.read_ptr, tfd_num, ssn); + + /*Since we free until index _not_ inclusive, the one before index is + * the last we will free. This one must be used */ + last_to_free = iwl_queue_dec_wrap(tfd_num); + + if (!iwl_queue_used(q, last_to_free)) { + IWL_ERR(trans, + "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, + q->write_ptr, q->read_ptr); + goto out; + } + + if (WARN_ON(!skb_queue_empty(skbs))) + goto out; + + for (; + q->read_ptr != tfd_num; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { + + if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) + continue; + + __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); + + txq->entries[txq->q.read_ptr].skb = NULL; + + iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); + + iwl_pcie_txq_free_tfd(trans, txq); + } + + iwl_pcie_txq_progress(txq); + + if (iwl_queue_space(&txq->q) > txq->q.low_mark) + iwl_wake_queue(trans, txq); + + if (q->read_ptr == q->write_ptr) { + IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); + iwl_trans_pcie_unref(trans); + } + +out: + spin_unlock_bh(&txq->lock); +} + +static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, + const struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (!(cmd->flags & CMD_SEND_IN_IDLE) && + !trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = true; + IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); + iwl_trans_pcie_ref(trans); + } + + /* + * wake up the NIC to make sure that the firmware will see the host + * command - we will let the NIC sleep once all the host commands + * returned. This needs to be done only on NICs that have + * apmg_wake_up_wa set. + */ + if (trans->cfg->base_params->apmg_wake_up_wa && + !trans_pcie->cmd_hold_nic_awake) { + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), + 15000); + if (ret < 0) { + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); + return -EIO; + } + trans_pcie->cmd_hold_nic_awake = true; + } + + return 0; +} + +static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = false; + IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); + iwl_trans_pcie_unref(trans); + } + + if (trans->cfg->base_params->apmg_wake_up_wa) { + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + return 0; + + trans_pcie->cmd_hold_nic_awake = false; + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } + return 0; +} + +/* + * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_queue *q = &txq->q; + unsigned long flags; + int nfreed = 0; + + lockdep_assert_held(&txq->lock); + + if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { + IWL_ERR(trans, + "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, + q->write_ptr, q->read_ptr); + return; + } + + for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { + + if (nfreed++ > 0) { + IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", + idx, q->write_ptr, q->read_ptr); + iwl_force_nmi(trans); + } + } + + if (q->read_ptr == q->write_ptr) { + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + iwl_pcie_clear_cmd_in_flight(trans); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + } + + iwl_pcie_txq_progress(txq); +} + +static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, + u16 txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); + + return 0; +} + +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + int fifo = -1; + + if (test_and_set_bit(txq_id, trans_pcie->queue_used)) + WARN_ONCE(1, "queue %d already used - expect issues", txq_id); + + txq->wd_timeout = msecs_to_jiffies(wdg_timeout); + + if (cfg) { + fifo = cfg->fifo; + + /* Disable the scheduler prior configuring the cmd queue */ + if (txq_id == trans_pcie->cmd_queue && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, 0); + + /* Stop this Tx queue before configuring it */ + iwl_scd_txq_set_inactive(trans, txq_id); + + /* Set this queue as a chain-building queue unless it is CMD */ + if (txq_id != trans_pcie->cmd_queue) + iwl_scd_txq_set_chain(trans, txq_id); + + if (cfg->aggregate) { + u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); + + /* Map receiver-address / traffic-ID to this queue */ + iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + + /* enable aggregations for the queue */ + iwl_scd_txq_enable_agg(trans, txq_id); + txq->ampdu = true; + } else { + /* + * disable aggregations for the queue, this will also + * make the ra_tid mapping configuration irrelevant + * since it is now a non-AGG queue. + */ + iwl_scd_txq_disable_agg(trans, txq_id); + + ssn = txq->q.read_ptr; + } + } + + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + txq->q.read_ptr = (ssn & 0xff); + txq->q.write_ptr = (ssn & 0xff); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (ssn & 0xff) | (txq_id << 8)); + + if (cfg) { + u8 frame_limit = cfg->frame_limit; + + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + + /* Set up Tx window size and frame limit for this queue */ + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); + iwl_trans_write_mem32(trans, + trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + + /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + /* enable the scheduler for this queue (only) */ + if (txq_id == trans_pcie->cmd_queue && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, BIT(txq_id)); + + IWL_DEBUG_TX_QUEUES(trans, + "Activate queue %d on FIFO %d WrPtr: %d\n", + txq_id, fifo, ssn & 0xff); + } else { + IWL_DEBUG_TX_QUEUES(trans, + "Activate queue %d WrPtr: %d\n", + txq_id, ssn & 0xff); + } + + txq->active = true; +} + +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, + bool configure_scd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 stts_addr = trans_pcie->scd_base_addr + + SCD_TX_STTS_QUEUE_OFFSET(txq_id); + static const u32 zero_val[4] = {}; + + trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; + trans_pcie->txq[txq_id].frozen = false; + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", txq_id); + return; + } + + if (configure_scd) { + iwl_scd_txq_set_inactive(trans, txq_id); + + iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, + ARRAY_SIZE(zero_val)); + } + + iwl_pcie_txq_unmap(trans, txq_id); + trans_pcie->txq[txq_id].ampdu = false; + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_pcie_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a pointer to the ucode command structure + * + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the + * command queue. + */ +static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + unsigned long flags; + void *dup_buf = NULL; + dma_addr_t phys_addr; + int idx; + u16 copy_size, cmd_size, scratch_size; + bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); + int i, ret; + u32 cmd_pos; + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + + if (WARN(!trans_pcie->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } + + /* need one for the header if the first is NOCOPY */ + BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); + + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + cmddata[i] = cmd->data[i]; + cmdlen[i] = cmd->len[i]; + + if (!cmd->len[i]) + continue; + + /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; + + if (copy > cmdlen[i]) + copy = cmdlen[i]; + cmdlen[i] -= copy; + cmddata[i] += copy; + copy_size += copy; + } + + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { + had_nocopy = true; + if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { + idx = -EINVAL; + goto free_dup_buf; + } + } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { + /* + * This is also a chunk that isn't copied + * to the static buffer so set had_nocopy. + */ + had_nocopy = true; + + /* only allowed once */ + if (WARN_ON(dup_buf)) { + idx = -EINVAL; + goto free_dup_buf; + } + + dup_buf = kmemdup(cmddata[i], cmdlen[i], + GFP_ATOMIC); + if (!dup_buf) + return -ENOMEM; + } else { + /* NOCOPY must not be followed by normal! */ + if (WARN_ON(had_nocopy)) { + idx = -EINVAL; + goto free_dup_buf; + } + copy_size += cmdlen[i]; + } + cmd_size += cmd->len[i]; + } + + /* + * If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically + * allocated into separate TFDs, then we will need to + * increase the size of the buffers. + */ + if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, + "Command %s (%#x) is too large (%d bytes)\n", + get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { + idx = -EINVAL; + goto free_dup_buf; + } + + spin_lock_bh(&txq->lock); + + if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_bh(&txq->lock); + + IWL_ERR(trans, "No space in command queue\n"); + iwl_op_mode_cmd_queue_full(trans->op_mode); + idx = -ENOSPC; + goto free_dup_buf; + } + + idx = get_cmd_index(q, q->write_ptr); + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + + /* set up the header */ + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } + + /* and copy the data that needs to be copied */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + int copy; + + if (!cmd->len[i]) + continue; + + /* copy everything if not nocopy/dup */ + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) { + copy = cmd->len[i]; + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied + * in total (for the scratchbuf handling), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; + } + } + + IWL_DEBUG_HC(trans, + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + get_cmd_string(trans_pcie, out_cmd->hdr.cmd), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), + cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); + + /* start the TFD with the scratchbuf */ + scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); + memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); + iwl_pcie_txq_build_tfd(trans, txq, + iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), + scratch_size, true); + + /* map first command fragment, if any remains */ + if (copy_size > scratch_size) { + phys_addr = dma_map_single(trans->dev, + ((u8 *)&out_cmd->hdr) + scratch_size, + copy_size - scratch_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + idx = -ENOMEM; + goto out; + } + + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, + copy_size - scratch_size, false); + } + + /* map the remaining (adjusted) nocopy/dup fragments */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + const void *data = cmddata[i]; + + if (!cmdlen[i]) + continue; + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) + data = dup_buf; + phys_addr = dma_map_single(trans->dev, (void *)data, + cmdlen[i], DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + idx = -ENOMEM; + goto out; + } + + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); + } + + BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > + sizeof(out_meta->flags) * BITS_PER_BYTE); + out_meta->flags = cmd->flags; + if (WARN_ON_ONCE(txq->entries[idx].free_buf)) + kzfree(txq->entries[idx].free_buf); + txq->entries[idx].free_buf = dup_buf; + + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); + + /* start timer if queue currently empty */ + if (q->read_ptr == q->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + ret = iwl_pcie_set_cmd_in_flight(trans, cmd); + if (ret < 0) { + idx = ret; + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + goto out; + } + + /* Increment and update queue's write index */ + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); + iwl_pcie_txq_inc_wr_ptr(trans, txq); + + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + + out: + spin_unlock_bh(&txq->lock); + free_dup_buf: + if (idx < 0) + kfree(dup_buf); + return idx; +} + +/* + * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +void iwl_pcie_hcmd_complete(struct iwl_trans *trans, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index; + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN(txq_id != trans_pcie->cmd_queue, + "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", + txq_id, trans_pcie->cmd_queue, sequence, + trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, + trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { + iwl_print_hex_error(trans, pkt, 32); + return; + } + + spin_lock_bh(&txq->lock); + + cmd_index = get_cmd_index(&txq->q, index); + cmd = txq->entries[cmd_index].cmd; + meta = &txq->entries[cmd_index].meta; + + iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + struct page *p = rxb_steal_page(rxb); + + meta->source->resp_pkt = pkt; + meta->source->_rx_page_addr = (unsigned long)page_address(p); + meta->source->_rx_page_order = trans_pcie->rx_page_order; + } + + iwl_pcie_cmdq_reclaim(trans, txq_id, index); + + if (!(meta->flags & CMD_ASYNC)) { + if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { + IWL_WARN(trans, + "HCMD_ACTIVE already clear for command %s\n", + get_cmd_string(trans_pcie, cmd->hdr.cmd)); + } + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + get_cmd_string(trans_pcie, cmd->hdr.cmd)); + wake_up(&trans_pcie->wait_command_queue); + } + + meta->flags = 0; + + spin_unlock_bh(&txq->lock); +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) + +static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + /* An asynchronous command can not expect an SKB to be set. */ + if (WARN_ON(cmd->flags & CMD_WANT_SKB)) + return -EINVAL; + + ret = iwl_pcie_enqueue_hcmd(trans, cmd); + if (ret < 0) { + IWL_ERR(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", + get_cmd_string(trans_pcie, cmd->id), ret); + return ret; + } + return 0; +} + +static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int cmd_idx; + int ret; + + IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", + get_cmd_string(trans_pcie, cmd->id)); + + if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + "Command %s: a command is already active!\n", + get_cmd_string(trans_pcie, cmd->id))) + return -EIO; + + IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", + get_cmd_string(trans_pcie, cmd->id)); + + cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_ERR(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", + get_cmd_string(trans_pcie, cmd->id), ret); + return ret; + } + + ret = wait_event_timeout(trans_pcie->wait_command_queue, + !test_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_queue *q = &txq->q; + + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", + get_cmd_string(trans_pcie, cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", + q->read_ptr, q->write_ptr); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + get_cmd_string(trans_pcie, cmd->id)); + ret = -ETIMEDOUT; + + iwl_force_nmi(trans); + iwl_trans_fw_error(trans); + + goto cancel; + } + + if (test_bit(STATUS_FW_ERROR, &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", + get_cmd_string(trans_pcie, cmd->id)); + dump_stack(); + ret = -EIO; + goto cancel; + } + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); + ret = -ERFKILL; + goto cancel; + } + + if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { + IWL_ERR(trans, "Error: Response NULL in '%s'\n", + get_cmd_string(trans_pcie, cmd->id)); + ret = -EIO; + goto cancel; + } + + return 0; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + trans_pcie->txq[trans_pcie->cmd_queue]. + entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + } + + if (cmd->resp_pkt) { + iwl_free_resp(cmd); + cmd->resp_pkt = NULL; + } + + return ret; +} + +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) +{ + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", + cmd->id); + return -ERFKILL; + } + + if (cmd->flags & CMD_ASYNC) + return iwl_pcie_send_hcmd_async(trans, cmd); + + /* We still can fail on RFKILL that can be asserted while we wait */ + return iwl_pcie_send_hcmd_sync(trans, cmd); +} + +int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct ieee80211_hdr *hdr; + struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq; + struct iwl_queue *q; + dma_addr_t tb0_phys, tb1_phys, scratch_phys; + void *tb1_addr; + u16 len, tb1_len, tb2_len; + bool wait_write_ptr; + __le16 fc; + u8 hdr_len; + u16 wifi_seq; + int i; + + txq = &trans_pcie->txq[txq_id]; + q = &txq->q; + + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); + + spin_lock(&txq->lock); + + /* In AGG mode, the index in the ring must correspond to the WiFi + * sequence number. This is a HW requirements to help the SCD to parse + * the BA. + * Check here that the packets are in the right place on the ring. + */ + wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + WARN_ONCE(txq->ampdu && + (wifi_seq & 0xff) != q->write_ptr, + "Q: %d WiFi Seq %d tfdNum %d", + txq_id, wifi_seq, q->write_ptr); + + /* Set up driver data for this TFD */ + txq->entries[q->write_ptr].skb = skb; + txq->entries[q->write_ptr].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); + scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[q->write_ptr].meta; + out_meta->flags = 0; + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; + tb1_len = ALIGN(len, 4); + + /* Tell NIC about any 2-byte padding after MAC header */ + if (tb1_len != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* The first TB points to the scratchbuf data - min_copy bytes */ + memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, + IWL_HCMD_SCRATCHBUF_SIZE); + iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, + IWL_HCMD_SCRATCHBUF_SIZE, true); + + /* there must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; + tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) + goto out_err; + iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); + + /* + * Set up TFD's third entry to point directly to remainder + * of skb's head, if any + */ + tb2_len = skb_headlen(skb) - hdr_len; + if (tb2_len > 0) { + dma_addr_t tb2_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); + } + + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } + + /* Set up entry for this TFD in Tx byte-count array */ + iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); + + trace_iwlwifi_dev_tx(trans->dev, skb, + &txq->tfds[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + skb->data + hdr_len, tb2_len); + trace_iwlwifi_dev_tx_data(trans->dev, skb, + hdr_len, skb->len - hdr_len); + + wait_write_ptr = ieee80211_has_morefrags(fc); + + /* start timer if queue currently empty */ + if (q->read_ptr == q->write_ptr) { + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } + IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); + iwl_trans_pcie_ref(trans); + } + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); + if (!wait_write_ptr) + iwl_pcie_txq_inc_wr_ptr(trans, txq); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + if (iwl_queue_space(q) < q->high_mark) { + if (wait_write_ptr) + iwl_pcie_txq_inc_wr_ptr(trans, txq); + else + iwl_stop_queue(trans, txq); + } + spin_unlock(&txq->lock); + return 0; +out_err: + spin_unlock(&txq->lock); + return -1; +} diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig deleted file mode 100644 index 6e949df399d6..000000000000 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ /dev/null @@ -1,161 +0,0 @@ -config IWLWIFI - tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " - depends on PCI && MAC80211 && HAS_IOMEM - select FW_LOADER - ---help--- - Select to build the driver supporting the: - - Intel Wireless WiFi Link Next-Gen AGN - - This option enables support for use with the following hardware: - Intel Wireless WiFi Link 6250AGN Adapter - Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) - Intel WiFi Link 1000BGN - Intel Wireless WiFi 5150AGN - Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN - Intel 6005 Series Wi-Fi Adapters - Intel 6030 Series Wi-Fi Adapters - Intel Wireless WiFi Link 6150BGN 2 Adapter - Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) - Intel 2000 Series Wi-Fi Adapters - Intel 7260 Wi-Fi Adapter - Intel 3160 Wi-Fi Adapter - Intel 7265 Wi-Fi Adapter - Intel 8260 Wi-Fi Adapter - Intel 3165 Wi-Fi Adapter - - - This driver uses the kernel's mac80211 subsystem. - - In order to use this driver, you will need a firmware - image for it. You can obtain the microcode from: - - . - - The firmware is typically installed in /lib/firmware. You can - look in the hotplug script /etc/hotplug/firmware.agent to - determine which directory FIRMWARE_DIR is set to when the script - runs. - - If you want to compile the driver as a module ( = code which can be - inserted in and removed from the running kernel whenever you want), - say M here and read . The - module will be called iwlwifi. - -if IWLWIFI - -config IWLWIFI_LEDS - bool - depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI - select LEDS_TRIGGERS - select MAC80211_LEDS - default y - -config IWLDVM - tristate "Intel Wireless WiFi DVM Firmware support" - default IWLWIFI - help - This is the driver that supports the DVM firmware. The list - of the devices that use this firmware is available here: - https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware - -config IWLMVM - tristate "Intel Wireless WiFi MVM Firmware support" - select WANT_DEV_COREDUMP - help - This is the driver that supports the MVM firmware. The list - of the devices that use this firmware is available here: - https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware - -# don't call it _MODULE -- will confuse Kconfig/fixdep/... -config IWLWIFI_OPMODE_MODULAR - bool - default y if IWLDVM=m - default y if IWLMVM=m - -comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM" - depends on IWLDVM=n && IWLMVM=n - -config IWLWIFI_BCAST_FILTERING - bool "Enable broadcast filtering" - depends on IWLMVM - help - Say Y here to enable default bcast filtering configuration. - - Enabling broadcast filtering will drop any incoming wireless - broadcast frames, except some very specific predefined - patterns (e.g. incoming arp requests). - - If unsure, don't enable this option, as some programs might - expect incoming broadcasts for their normal operations. - -config IWLWIFI_UAPSD - bool "enable U-APSD by default" - depends on IWLMVM - help - Say Y here to enable U-APSD by default. This may cause - interoperability problems with some APs, manifesting in lower than - expected throughput due to those APs not enabling aggregation - - If unsure, say N. - -menu "Debugging Options" - -config IWLWIFI_DEBUG - bool "Enable full debugging output in the iwlwifi driver" - ---help--- - This option will enable debug tracing output for the iwlwifi drivers - - This will result in the kernel module being ~100k larger. You can - control which debug output is sent to the kernel log by setting the - value in - - /sys/module/iwlwifi/parameters/debug - - This entry will only exist if this option is enabled. - - To set a value, simply echo an 8-byte hex value to the same file: - - % echo 0x43fff > /sys/module/iwlwifi/parameters/debug - - You can find the list of debug mask values in: - drivers/net/wireless/iwlwifi/iwl-debug.h - - If this is your first time using this driver, you should say Y here - as the debug information can assist others in helping you resolve - any problems you may encounter. - -config IWLWIFI_DEBUGFS - bool "iwlwifi debugfs support" - depends on MAC80211_DEBUGFS - ---help--- - Enable creation of debugfs files for the iwlwifi drivers. This - is a low-impact option that allows getting insight into the - driver's state at runtime. - -config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE - bool "Experimental uCode support" - depends on IWLWIFI_DEBUG - ---help--- - Enable use of experimental ucode for testing and debugging. - -config IWLWIFI_DEVICE_TRACING - bool "iwlwifi device access tracing" - depends on EVENT_TRACING - default y - help - Say Y here to trace all commands, including TX frames and IO - accesses, sent to the device. If you say yes, iwlwifi will - register with the ftrace framework for event tracing and dump - all this information to the ringbuffer, you may need to - increase the ringbuffer size. See the ftrace documentation - for more information. - - When tracing is not enabled, this option still has some - (though rather small) overhead. - - If unsure, say Y so we can help you better when problems - occur. -endmenu - -endif diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile deleted file mode 100644 index dbfc5b18bcb7..000000000000 --- a/drivers/net/wireless/iwlwifi/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# common -obj-$(CONFIG_IWLWIFI) += iwlwifi.o -iwlwifi-objs += iwl-io.o -iwlwifi-objs += iwl-drv.o -iwlwifi-objs += iwl-debug.o -iwlwifi-objs += iwl-notif-wait.o -iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o -iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o -iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o -iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o -iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o -iwlwifi-objs += iwl-trans.o - -iwlwifi-objs += $(iwlwifi-m) - -iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o - -ccflags-y += -D__CHECK_ENDIAN__ -I$(src) - -obj-$(CONFIG_IWLDVM) += dvm/ -obj-$(CONFIG_IWLMVM) += mvm/ - -CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile deleted file mode 100644 index 4d19685f31c3..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# DVM -obj-$(CONFIG_IWLDVM) += iwldvm.o -iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o -iwldvm-objs += lib.o calib.o tt.o sta.o rx.o - -iwldvm-objs += power.o -iwldvm-objs += scan.o -iwldvm-objs += rxon.o devices.o - -iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o - -ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h deleted file mode 100644 index 991def878881..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ /dev/null @@ -1,485 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_agn_h__ -#define __iwl_agn_h__ - -#include "iwl-config.h" - -#include "dev.h" - -/* The first 11 queues (0-10) are used otherwise */ -#define IWLAGN_FIRST_AMPDU_QUEUE 11 - -/* AUX (TX during scan dwell) queue */ -#define IWL_AUX_QUEUE 10 - -#define IWL_INVALID_STATION 255 - -/* device operations */ -extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_105_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg; -extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg; - - -#define TIME_UNIT 1024 - -/***************************************************** -* DRIVER STATUS FUNCTIONS -******************************************************/ -#define STATUS_RF_KILL_HW 0 -#define STATUS_CT_KILL 1 -#define STATUS_ALIVE 2 -#define STATUS_READY 3 -#define STATUS_EXIT_PENDING 5 -#define STATUS_STATISTICS 6 -#define STATUS_SCANNING 7 -#define STATUS_SCAN_ABORTING 8 -#define STATUS_SCAN_HW 9 -#define STATUS_FW_ERROR 10 -#define STATUS_CHANNEL_SWITCH_PENDING 11 -#define STATUS_SCAN_COMPLETE 12 -#define STATUS_POWER_PMI 13 - -struct iwl_ucode_capabilities; - -extern const struct ieee80211_ops iwlagn_hw_ops; - -static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) -{ - hdr->op_code = cmd; - hdr->first_group = 0; - hdr->groups_num = 1; - hdr->data_valid = 1; -} - -void iwl_down(struct iwl_priv *priv); -void iwl_cancel_deferred_work(struct iwl_priv *priv); -void iwlagn_prepare_restart(struct iwl_priv *priv); -void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb); - -bool iwl_check_for_ct_kill(struct iwl_priv *priv); - -void iwlagn_lift_passive_no_rx(struct iwl_priv *priv); - -/* MAC80211 */ -struct ieee80211_hw *iwl_alloc_all(void); -int iwlagn_mac_setup_register(struct iwl_priv *priv, - const struct iwl_ucode_capabilities *capa); -void iwlagn_mac_unregister(struct iwl_priv *priv); - -/* commands */ -int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); -int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, - u32 flags, u16 len, const void *data); - -/* RXON */ -void iwl_connection_init_rx_config(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwlagn_set_pan_params(struct iwl_priv *priv); -int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); -void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); -int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed); -void iwlagn_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes); -void iwlagn_config_ht40(struct ieee80211_conf *conf, - struct iwl_rxon_context *ctx); -void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); -void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, - struct iwl_rxon_context *ctx); -void iwl_set_flags_for_band(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - enum ieee80211_band band, - struct ieee80211_vif *vif); - -/* uCode */ -int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); -void iwl_send_prio_tbl(struct iwl_priv *priv); -int iwl_init_alive_start(struct iwl_priv *priv); -int iwl_run_init_ucode(struct iwl_priv *priv); -int iwl_load_ucode_wait_alive(struct iwl_priv *priv, - enum iwl_ucode_type ucode_type); -int iwl_send_calib_results(struct iwl_priv *priv); -int iwl_calib_set(struct iwl_priv *priv, - const struct iwl_calib_hdr *cmd, int len); -void iwl_calib_free_results(struct iwl_priv *priv); -int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, - char **buf); -int iwlagn_hw_valid_rtc_data_addr(u32 addr); - -/* lib */ -int iwlagn_send_tx_power(struct iwl_priv *priv); -void iwlagn_temperature(struct iwl_priv *priv); -int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk); -void iwlagn_dev_txfifo_flush(struct iwl_priv *priv); -int iwlagn_send_beacon_cmd(struct iwl_priv *priv); -int iwl_send_statistics_request(struct iwl_priv *priv, - u8 flags, bool clear); - -static inline const struct ieee80211_supported_band *iwl_get_hw_mode( - struct iwl_priv *priv, enum ieee80211_band band) -{ - return priv->hw->wiphy->bands[band]; -} - -#ifdef CONFIG_PM_SLEEP -int iwlagn_send_patterns(struct iwl_priv *priv, - struct cfg80211_wowlan *wowlan); -int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan); -#endif - -/* rx */ -int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); -void iwl_setup_rx_handlers(struct iwl_priv *priv); -void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); - - -/* tx */ -int iwlagn_tx_skb(struct iwl_priv *priv, - struct ieee80211_sta *sta, - struct sk_buff *skb); -int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 *ssn); -int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size); -int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid); -int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid); -void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb); -void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); - -static inline u32 iwl_tx_status_to_mac80211(u32 status) -{ - status &= TX_STATUS_MSK; - - switch (status) { - case TX_STATUS_SUCCESS: - case TX_STATUS_DIRECT_DONE: - return IEEE80211_TX_STAT_ACK; - case TX_STATUS_FAIL_DEST_PS: - case TX_STATUS_FAIL_PASSIVE_NO_RX: - return IEEE80211_TX_STAT_TX_FILTERED; - default: - return 0; - } -} - -static inline bool iwl_is_tx_success(u32 status) -{ - status &= TX_STATUS_MSK; - return (status == TX_STATUS_SUCCESS) || - (status == TX_STATUS_DIRECT_DONE); -} - -u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); - -/* scan */ -void iwlagn_post_scan(struct iwl_priv *priv); -int iwl_force_rf_reset(struct iwl_priv *priv, bool external); -void iwl_init_scan_params(struct iwl_priv *priv); -int iwl_scan_cancel(struct iwl_priv *priv); -void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); -void iwl_force_scan_end(struct iwl_priv *priv); -void iwl_internal_short_hw_scan(struct iwl_priv *priv); -void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); -void iwl_setup_scan_deferred_work(struct iwl_priv *priv); -void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); -int __must_check iwl_scan_initiate(struct iwl_priv *priv, - struct ieee80211_vif *vif, - enum iwl_scan_type scan_type, - enum ieee80211_band band); - -/* For faster active scanning, scan will move to the next channel if fewer than - * PLCP_QUIET_THRESH packets are heard on this channel within - * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell - * time if it's a quiet channel (nothing responded to our probe, and there's - * no other traffic). - * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ -#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ -#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ - -#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15) - - -/* bt coex */ -void iwlagn_send_advance_bt_config(struct iwl_priv *priv); -void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv); -void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv); -void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); -void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv); -void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); - -static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) -{ - return priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist; -} - -#ifdef CONFIG_IWLWIFI_DEBUG -const char *iwl_get_tx_fail_reason(u32 status); -const char *iwl_get_agg_tx_fail_reason(u16 status); -#else -static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; } -static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; } -#endif - - -/* station management */ -int iwlagn_manage_ibss_station(struct iwl_priv *priv, - struct ieee80211_vif *vif, bool add); -#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ -#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ -#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of - being activated */ -#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211; - (this is for the IBSS BSSID stations) */ -#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ - - -void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx); -void iwl_clear_ucode_stations(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -void iwl_dealloc_bcast_stations(struct iwl_priv *priv); -int iwl_get_free_ucode_key_offset(struct iwl_priv *priv); -int iwl_send_add_sta(struct iwl_priv *priv, - struct iwl_addsta_cmd *sta, u8 flags); -int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, - struct ieee80211_sta *sta, u8 *sta_id_r); -int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, - const u8 *addr); -void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, - const u8 *addr); -u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, struct ieee80211_sta *sta); - -int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct iwl_link_quality_cmd *lq, u8 flags, bool init); -void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); -int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_sta *sta); - -bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_sta *sta); - -static inline int iwl_sta_id(struct ieee80211_sta *sta) -{ - if (WARN_ON(!sta)) - return IWL_INVALID_STATION; - - return ((struct iwl_station_priv *)sta->drv_priv)->sta_id; -} - -int iwlagn_alloc_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, u8 *sta_id_r); -int iwl_remove_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key); -int iwl_set_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key); -int iwl_restore_default_wep_keys(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key, - struct ieee80211_sta *sta); -int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key, - struct ieee80211_sta *sta); -void iwl_update_tkip_key(struct iwl_priv *priv, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); -int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); -int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid, u16 ssn); -int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid); -void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); -int iwl_update_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -int iwl_update_bcast_stations(struct iwl_priv *priv); - -/* rate */ -static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) -{ - return BIT(ant_idx) << RATE_MCS_ANT_POS; -} - -static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) -{ - return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK; -} - -static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) -{ - return cpu_to_le32(flags|(u32)rate); -} - -int iwl_alive_start(struct iwl_priv *priv); - -#ifdef CONFIG_IWLWIFI_DEBUG -void iwl_print_rx_config_cmd(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid); -#else -static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid) -{ -} -#endif - -/* status checks */ - -static inline int iwl_is_ready(struct iwl_priv *priv) -{ - /* The adapter is 'ready' if READY EXIT_PENDING is not set */ - return test_bit(STATUS_READY, &priv->status) && - !test_bit(STATUS_EXIT_PENDING, &priv->status); -} - -static inline int iwl_is_alive(struct iwl_priv *priv) -{ - return test_bit(STATUS_ALIVE, &priv->status); -} - -static inline int iwl_is_rfkill(struct iwl_priv *priv) -{ - return test_bit(STATUS_RF_KILL_HW, &priv->status); -} - -static inline int iwl_is_ctkill(struct iwl_priv *priv) -{ - return test_bit(STATUS_CT_KILL, &priv->status); -} - -static inline int iwl_is_ready_rf(struct iwl_priv *priv) -{ - if (iwl_is_rfkill(priv)) - return 0; - - return iwl_is_ready(priv); -} - -static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) -{ - if (state) - set_bit(STATUS_POWER_PMI, &priv->status); - else - clear_bit(STATUS_POWER_PMI, &priv->status); - iwl_trans_set_pmi(priv->trans, state); -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); -#else -static inline int iwl_dbgfs_register(struct iwl_priv *priv, - struct dentry *dbgfs_dir) -{ - return 0; -} -#endif /* CONFIG_IWLWIFI_DEBUGFS */ - -#ifdef CONFIG_IWLWIFI_DEBUG -#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ -do { \ - if (!iwl_is_rfkill((m))) \ - IWL_ERR(m, fmt, ##args); \ - else \ - __iwl_err((m)->dev, true, \ - !iwl_have_debug_level(IWL_DL_RADIO), \ - fmt, ##args); \ -} while (0) -#else -#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ -do { \ - if (!iwl_is_rfkill((m))) \ - IWL_ERR(m, fmt, ##args); \ - else \ - __iwl_err((m)->dev, true, true, fmt, ##args); \ -} while (0) -#endif /* CONFIG_IWLWIFI_DEBUG */ - -extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1]; - -static inline const char *iwl_dvm_get_cmd_string(u8 cmd) -{ - const char *s = iwl_dvm_cmd_strings[cmd]; - if (s) - return s; - return "UNKNOWN"; -} -#endif /* __iwl_agn_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c deleted file mode 100644 index 20e6aa910700..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c +++ /dev/null @@ -1,1113 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#include -#include - -#include "iwl-trans.h" - -#include "dev.h" -#include "calib.h" -#include "agn.h" - -/***************************************************************************** - * INIT calibrations framework - *****************************************************************************/ - -/* Opaque calibration results */ -struct iwl_calib_result { - struct list_head list; - size_t cmd_len; - struct iwl_calib_hdr hdr; - /* data follows */ -}; - -struct statistics_general_data { - u32 beacon_silence_rssi_a; - u32 beacon_silence_rssi_b; - u32 beacon_silence_rssi_c; - u32 beacon_energy_a; - u32 beacon_energy_b; - u32 beacon_energy_c; -}; - -int iwl_send_calib_results(struct iwl_priv *priv) -{ - struct iwl_host_cmd hcmd = { - .id = REPLY_PHY_CALIBRATION_CMD, - }; - struct iwl_calib_result *res; - - list_for_each_entry(res, &priv->calib_results, list) { - int ret; - - hcmd.len[0] = res->cmd_len; - hcmd.data[0] = &res->hdr; - hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - ret = iwl_dvm_send_cmd(priv, &hcmd); - if (ret) { - IWL_ERR(priv, "Error %d on calib cmd %d\n", - ret, res->hdr.op_code); - return ret; - } - } - - return 0; -} - -int iwl_calib_set(struct iwl_priv *priv, - const struct iwl_calib_hdr *cmd, int len) -{ - struct iwl_calib_result *res, *tmp; - - res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr), - GFP_ATOMIC); - if (!res) - return -ENOMEM; - memcpy(&res->hdr, cmd, len); - res->cmd_len = len; - - list_for_each_entry(tmp, &priv->calib_results, list) { - if (tmp->hdr.op_code == res->hdr.op_code) { - list_replace(&tmp->list, &res->list); - kfree(tmp); - return 0; - } - } - - /* wasn't in list already */ - list_add_tail(&res->list, &priv->calib_results); - - return 0; -} - -void iwl_calib_free_results(struct iwl_priv *priv) -{ - struct iwl_calib_result *res, *tmp; - - list_for_each_entry_safe(res, tmp, &priv->calib_results, list) { - list_del(&res->list); - kfree(res); - } -} - -/***************************************************************************** - * RUNTIME calibrations framework - *****************************************************************************/ - -/* "false alarms" are signals that our DSP tries to lock onto, - * but then determines that they are either noise, or transmissions - * from a distant wireless network (also "noise", really) that get - * "stepped on" by stronger transmissions within our own network. - * This algorithm attempts to set a sensitivity level that is high - * enough to receive all of our own network traffic, but not so - * high that our DSP gets too busy trying to lock onto non-network - * activity/noise. */ -static int iwl_sens_energy_cck(struct iwl_priv *priv, - u32 norm_fa, - u32 rx_enable_time, - struct statistics_general_data *rx_info) -{ - u32 max_nrg_cck = 0; - int i = 0; - u8 max_silence_rssi = 0; - u32 silence_ref = 0; - u8 silence_rssi_a = 0; - u8 silence_rssi_b = 0; - u8 silence_rssi_c = 0; - u32 val; - - /* "false_alarms" values below are cross-multiplications to assess the - * numbers of false alarms within the measured period of actual Rx - * (Rx is off when we're txing), vs the min/max expected false alarms - * (some should be expected if rx is sensitive enough) in a - * hypothetical listening period of 200 time units (TU), 204.8 msec: - * - * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time - * - * */ - u32 false_alarms = norm_fa * 200 * 1024; - u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; - u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; - struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - - data = &(priv->sensitivity_data); - - data->nrg_auto_corr_silence_diff = 0; - - /* Find max silence rssi among all 3 receivers. - * This is background noise, which may include transmissions from other - * networks, measured during silence before our network's beacon */ - silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & - ALL_BAND_FILTER) >> 8); - silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & - ALL_BAND_FILTER) >> 8); - silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & - ALL_BAND_FILTER) >> 8); - - val = max(silence_rssi_b, silence_rssi_c); - max_silence_rssi = max(silence_rssi_a, (u8) val); - - /* Store silence rssi in 20-beacon history table */ - data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; - data->nrg_silence_idx++; - if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) - data->nrg_silence_idx = 0; - - /* Find max silence rssi across 20 beacon history */ - for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { - val = data->nrg_silence_rssi[i]; - silence_ref = max(silence_ref, val); - } - IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", - silence_rssi_a, silence_rssi_b, silence_rssi_c, - silence_ref); - - /* Find max rx energy (min value!) among all 3 receivers, - * measured during beacon frame. - * Save it in 10-beacon history table. */ - i = data->nrg_energy_idx; - val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); - data->nrg_value[i] = min(rx_info->beacon_energy_a, val); - - data->nrg_energy_idx++; - if (data->nrg_energy_idx >= 10) - data->nrg_energy_idx = 0; - - /* Find min rx energy (max value) across 10 beacon history. - * This is the minimum signal level that we want to receive well. - * Add backoff (margin so we don't miss slightly lower energy frames). - * This establishes an upper bound (min value) for energy threshold. */ - max_nrg_cck = data->nrg_value[0]; - for (i = 1; i < 10; i++) - max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); - max_nrg_cck += 6; - - IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", - rx_info->beacon_energy_a, rx_info->beacon_energy_b, - rx_info->beacon_energy_c, max_nrg_cck - 6); - - /* Count number of consecutive beacons with fewer-than-desired - * false alarms. */ - if (false_alarms < min_false_alarms) - data->num_in_cck_no_fa++; - else - data->num_in_cck_no_fa = 0; - IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", - data->num_in_cck_no_fa); - - /* If we got too many false alarms this time, reduce sensitivity */ - if ((false_alarms > max_false_alarms) && - (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { - IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", - false_alarms, max_false_alarms); - IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); - data->nrg_curr_state = IWL_FA_TOO_MANY; - /* Store for "fewer than desired" on later beacon */ - data->nrg_silence_ref = silence_ref; - - /* increase energy threshold (reduce nrg value) - * to decrease sensitivity */ - data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; - /* Else if we got fewer than desired, increase sensitivity */ - } else if (false_alarms < min_false_alarms) { - data->nrg_curr_state = IWL_FA_TOO_FEW; - - /* Compare silence level with silence level for most recent - * healthy number or too many false alarms */ - data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - - (s32)silence_ref; - - IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n", - false_alarms, min_false_alarms, - data->nrg_auto_corr_silence_diff); - - /* Increase value to increase sensitivity, but only if: - * 1a) previous beacon did *not* have *too many* false alarms - * 1b) AND there's a significant difference in Rx levels - * from a previous beacon with too many, or healthy # FAs - * OR 2) We've seen a lot of beacons (100) with too few - * false alarms */ - if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && - ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || - (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { - - IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); - /* Increase nrg value to increase sensitivity */ - val = data->nrg_th_cck + NRG_STEP_CCK; - data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); - } else { - IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n"); - } - - /* Else we got a healthy number of false alarms, keep status quo */ - } else { - IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); - data->nrg_curr_state = IWL_FA_GOOD_RANGE; - - /* Store for use in "fewer than desired" with later beacon */ - data->nrg_silence_ref = silence_ref; - - /* If previous beacon had too many false alarms, - * give it some extra margin by reducing sensitivity again - * (but don't go below measured energy of desired Rx) */ - if (IWL_FA_TOO_MANY == data->nrg_prev_state) { - IWL_DEBUG_CALIB(priv, "... increasing margin\n"); - if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) - data->nrg_th_cck -= NRG_MARGIN; - else - data->nrg_th_cck = max_nrg_cck; - } - } - - /* Make sure the energy threshold does not go above the measured - * energy of the desired Rx signals (reduced by backoff margin), - * or else we might start missing Rx frames. - * Lower value is higher energy, so we use max()! - */ - data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); - IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); - - data->nrg_prev_state = data->nrg_curr_state; - - /* Auto-correlation CCK algorithm */ - if (false_alarms > min_false_alarms) { - - /* increase auto_corr values to decrease sensitivity - * so the DSP won't be disturbed by the noise - */ - if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) - data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; - else { - val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; - data->auto_corr_cck = - min((u32)ranges->auto_corr_max_cck, val); - } - val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; - data->auto_corr_cck_mrc = - min((u32)ranges->auto_corr_max_cck_mrc, val); - } else if ((false_alarms < min_false_alarms) && - ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || - (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { - - /* Decrease auto_corr values to increase sensitivity */ - val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; - data->auto_corr_cck = - max((u32)ranges->auto_corr_min_cck, val); - val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; - data->auto_corr_cck_mrc = - max((u32)ranges->auto_corr_min_cck_mrc, val); - } - - return 0; -} - - -static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv, - u32 norm_fa, - u32 rx_enable_time) -{ - u32 val; - u32 false_alarms = norm_fa * 200 * 1024; - u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; - u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; - struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - - data = &(priv->sensitivity_data); - - /* If we got too many false alarms this time, reduce sensitivity */ - if (false_alarms > max_false_alarms) { - - IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", - false_alarms, max_false_alarms); - - val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm = - min((u32)ranges->auto_corr_max_ofdm, val); - - val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm_mrc = - min((u32)ranges->auto_corr_max_ofdm_mrc, val); - - val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm_x1 = - min((u32)ranges->auto_corr_max_ofdm_x1, val); - - val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm_mrc_x1 = - min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); - } - - /* Else if we got fewer than desired, increase sensitivity */ - else if (false_alarms < min_false_alarms) { - - IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", - false_alarms, min_false_alarms); - - val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm = - max((u32)ranges->auto_corr_min_ofdm, val); - - val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm_mrc = - max((u32)ranges->auto_corr_min_ofdm_mrc, val); - - val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm_x1 = - max((u32)ranges->auto_corr_min_ofdm_x1, val); - - val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; - data->auto_corr_ofdm_mrc_x1 = - max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); - } else { - IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", - min_false_alarms, false_alarms, max_false_alarms); - } - return 0; -} - -static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, - struct iwl_sensitivity_data *data, - __le16 *tbl) -{ - tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm); - tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm_mrc); - tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm_x1); - tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); - - tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = - cpu_to_le16((u16)data->auto_corr_cck); - tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16((u16)data->auto_corr_cck_mrc); - - tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = - cpu_to_le16((u16)data->nrg_th_cck); - tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = - cpu_to_le16((u16)data->nrg_th_ofdm); - - tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = - cpu_to_le16(data->barker_corr_th_min); - tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = - cpu_to_le16(data->barker_corr_th_min_mrc); - tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = - cpu_to_le16(data->nrg_th_cca); - - IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", - data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, - data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, - data->nrg_th_ofdm); - - IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", - data->auto_corr_cck, data->auto_corr_cck_mrc, - data->nrg_th_cck); -} - -/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ -static int iwl_sensitivity_write(struct iwl_priv *priv) -{ - struct iwl_sensitivity_cmd cmd; - struct iwl_sensitivity_data *data = NULL; - struct iwl_host_cmd cmd_out = { - .id = SENSITIVITY_CMD, - .len = { sizeof(struct iwl_sensitivity_cmd), }, - .flags = CMD_ASYNC, - .data = { &cmd, }, - }; - - data = &(priv->sensitivity_data); - - memset(&cmd, 0, sizeof(cmd)); - - iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); - - /* Update uCode's "work" table, and copy it to DSP */ - cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; - - /* Don't send command to uCode if nothing has changed */ - if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), - sizeof(u16)*HD_TABLE_SIZE)) { - IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); - return 0; - } - - /* Copy table for comparison next time */ - memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), - sizeof(u16)*HD_TABLE_SIZE); - - return iwl_dvm_send_cmd(priv, &cmd_out); -} - -/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ -static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) -{ - struct iwl_enhance_sensitivity_cmd cmd; - struct iwl_sensitivity_data *data = NULL; - struct iwl_host_cmd cmd_out = { - .id = SENSITIVITY_CMD, - .len = { sizeof(struct iwl_enhance_sensitivity_cmd), }, - .flags = CMD_ASYNC, - .data = { &cmd, }, - }; - - data = &(priv->sensitivity_data); - - memset(&cmd, 0, sizeof(cmd)); - - iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); - - if (priv->lib->hd_v2) { - cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = - HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; - cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = - HD_INA_NON_SQUARE_DET_CCK_DATA_V2; - cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] = - HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] = - HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = - HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] = - HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] = - HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] = - HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = - HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] = - HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] = - HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2; - } else { - cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = - HD_INA_NON_SQUARE_DET_OFDM_DATA_V1; - cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = - HD_INA_NON_SQUARE_DET_CCK_DATA_V1; - cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] = - HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] = - HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = - HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] = - HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1; - cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] = - HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] = - HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = - HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] = - HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1; - cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] = - HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1; - } - - /* Update uCode's "work" table, and copy it to DSP */ - cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; - - /* Don't send command to uCode if nothing has changed */ - if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]), - sizeof(u16)*HD_TABLE_SIZE) && - !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX], - &(priv->enhance_sensitivity_tbl[0]), - sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) { - IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); - return 0; - } - - /* Copy table for comparison next time */ - memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]), - sizeof(u16)*HD_TABLE_SIZE); - memcpy(&(priv->enhance_sensitivity_tbl[0]), - &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]), - sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES); - - return iwl_dvm_send_cmd(priv, &cmd_out); -} - -void iwl_init_sensitivity(struct iwl_priv *priv) -{ - int ret = 0; - int i; - struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - - if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED) - return; - - IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n"); - - /* Clear driver's sensitivity algo data */ - data = &(priv->sensitivity_data); - - if (ranges == NULL) - return; - - memset(data, 0, sizeof(struct iwl_sensitivity_data)); - - data->num_in_cck_no_fa = 0; - data->nrg_curr_state = IWL_FA_TOO_MANY; - data->nrg_prev_state = IWL_FA_TOO_MANY; - data->nrg_silence_ref = 0; - data->nrg_silence_idx = 0; - data->nrg_energy_idx = 0; - - for (i = 0; i < 10; i++) - data->nrg_value[i] = 0; - - for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) - data->nrg_silence_rssi[i] = 0; - - data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; - data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; - data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; - data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; - data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; - data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; - data->nrg_th_cck = ranges->nrg_th_cck; - data->nrg_th_ofdm = ranges->nrg_th_ofdm; - data->barker_corr_th_min = ranges->barker_corr_th_min; - data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc; - data->nrg_th_cca = ranges->nrg_th_cca; - - data->last_bad_plcp_cnt_ofdm = 0; - data->last_fa_cnt_ofdm = 0; - data->last_bad_plcp_cnt_cck = 0; - data->last_fa_cnt_cck = 0; - - if (priv->fw->enhance_sensitivity_table) - ret |= iwl_enhance_sensitivity_write(priv); - else - ret |= iwl_sensitivity_write(priv); - IWL_DEBUG_CALIB(priv, "<calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED) - return; - - data = &(priv->sensitivity_data); - - if (!iwl_is_any_associated(priv)) { - IWL_DEBUG_CALIB(priv, "<< - not associated\n"); - return; - } - - spin_lock_bh(&priv->statistics.lock); - rx_info = &priv->statistics.rx_non_phy; - ofdm = &priv->statistics.rx_ofdm; - cck = &priv->statistics.rx_cck; - if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { - IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); - spin_unlock_bh(&priv->statistics.lock); - return; - } - - /* Extract Statistics: */ - rx_enable_time = le32_to_cpu(rx_info->channel_load); - fa_cck = le32_to_cpu(cck->false_alarm_cnt); - fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt); - bad_plcp_cck = le32_to_cpu(cck->plcp_err); - bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); - - statis.beacon_silence_rssi_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a); - statis.beacon_silence_rssi_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b); - statis.beacon_silence_rssi_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c); - statis.beacon_energy_a = - le32_to_cpu(rx_info->beacon_energy_a); - statis.beacon_energy_b = - le32_to_cpu(rx_info->beacon_energy_b); - statis.beacon_energy_c = - le32_to_cpu(rx_info->beacon_energy_c); - - spin_unlock_bh(&priv->statistics.lock); - - IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); - - if (!rx_enable_time) { - IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); - return; - } - - /* These statistics increase monotonically, and do not reset - * at each beacon. Calculate difference from last value, or just - * use the new statistics value if it has reset or wrapped around. */ - if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) - data->last_bad_plcp_cnt_cck = bad_plcp_cck; - else { - bad_plcp_cck -= data->last_bad_plcp_cnt_cck; - data->last_bad_plcp_cnt_cck += bad_plcp_cck; - } - - if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) - data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; - else { - bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; - data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; - } - - if (data->last_fa_cnt_ofdm > fa_ofdm) - data->last_fa_cnt_ofdm = fa_ofdm; - else { - fa_ofdm -= data->last_fa_cnt_ofdm; - data->last_fa_cnt_ofdm += fa_ofdm; - } - - if (data->last_fa_cnt_cck > fa_cck) - data->last_fa_cnt_cck = fa_cck; - else { - fa_cck -= data->last_fa_cnt_cck; - data->last_fa_cnt_cck += fa_cck; - } - - /* Total aborted signal locks */ - norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; - norm_fa_cck = fa_cck + bad_plcp_cck; - - IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, - bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); - - iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); - iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); - if (priv->fw->enhance_sensitivity_table) - iwl_enhance_sensitivity_write(priv); - else - iwl_sensitivity_write(priv); -} - -static inline u8 find_first_chain(u8 mask) -{ - if (mask & ANT_A) - return CHAIN_A; - if (mask & ANT_B) - return CHAIN_B; - return CHAIN_C; -} - -/** - * Run disconnected antenna algorithm to find out which antennas are - * disconnected. - */ -static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, - struct iwl_chain_noise_data *data) -{ - u32 active_chains = 0; - u32 max_average_sig; - u16 max_average_sig_antenna_i; - u8 num_tx_chains; - u8 first_chain; - u16 i = 0; - - average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS; - average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS; - average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS; - - if (average_sig[0] >= average_sig[1]) { - max_average_sig = average_sig[0]; - max_average_sig_antenna_i = 0; - active_chains = (1 << max_average_sig_antenna_i); - } else { - max_average_sig = average_sig[1]; - max_average_sig_antenna_i = 1; - active_chains = (1 << max_average_sig_antenna_i); - } - - if (average_sig[2] >= max_average_sig) { - max_average_sig = average_sig[2]; - max_average_sig_antenna_i = 2; - active_chains = (1 << max_average_sig_antenna_i); - } - - IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", - average_sig[0], average_sig[1], average_sig[2]); - IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", - max_average_sig, max_average_sig_antenna_i); - - /* Compare signal strengths for all 3 receivers. */ - for (i = 0; i < NUM_RX_CHAINS; i++) { - if (i != max_average_sig_antenna_i) { - s32 rssi_delta = (max_average_sig - average_sig[i]); - - /* If signal is very weak, compared with - * strongest, mark it as disconnected. */ - if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) - data->disconn_array[i] = 1; - else - active_chains |= (1 << i); - IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " - "disconn_array[i] = %d\n", - i, rssi_delta, data->disconn_array[i]); - } - } - - /* - * The above algorithm sometimes fails when the ucode - * reports 0 for all chains. It's not clear why that - * happens to start with, but it is then causing trouble - * because this can make us enable more chains than the - * hardware really has. - * - * To be safe, simply mask out any chains that we know - * are not on the device. - */ - active_chains &= priv->nvm_data->valid_rx_ant; - - num_tx_chains = 0; - for (i = 0; i < NUM_RX_CHAINS; i++) { - /* loops on all the bits of - * priv->hw_setting.valid_tx_ant */ - u8 ant_msk = (1 << i); - if (!(priv->nvm_data->valid_tx_ant & ant_msk)) - continue; - - num_tx_chains++; - if (data->disconn_array[i] == 0) - /* there is a Tx antenna connected */ - break; - if (num_tx_chains == priv->hw_params.tx_chains_num && - data->disconn_array[i]) { - /* - * If all chains are disconnected - * connect the first valid tx chain - */ - first_chain = - find_first_chain(priv->nvm_data->valid_tx_ant); - data->disconn_array[first_chain] = 0; - active_chains |= BIT(first_chain); - IWL_DEBUG_CALIB(priv, - "All Tx chains are disconnected W/A - declare %d as connected\n", - first_chain); - break; - } - } - - if (active_chains != priv->nvm_data->valid_rx_ant && - active_chains != priv->chain_noise_data.active_chains) - IWL_DEBUG_CALIB(priv, - "Detected that not all antennas are connected! " - "Connected: %#x, valid: %#x.\n", - active_chains, - priv->nvm_data->valid_rx_ant); - - /* Save for use within RXON, TX, SCAN commands, etc. */ - data->active_chains = active_chains; - IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", - active_chains); -} - -static void iwlagn_gain_computation(struct iwl_priv *priv, - u32 average_noise[NUM_RX_CHAINS], - u8 default_chain) -{ - int i; - s32 delta_g; - struct iwl_chain_noise_data *data = &priv->chain_noise_data; - - /* - * Find Gain Code for the chains based on "default chain" - */ - for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { - if ((data->disconn_array[i])) { - data->delta_gain_code[i] = 0; - continue; - } - - delta_g = (priv->lib->chain_noise_scale * - ((s32)average_noise[default_chain] - - (s32)average_noise[i])) / 1500; - - /* bound gain by 2 bits value max, 3rd bit is sign */ - data->delta_gain_code[i] = - min(abs(delta_g), - (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); - - if (delta_g < 0) - /* - * set negative sign ... - * note to Intel developers: This is uCode API format, - * not the format of any internal device registers. - * Do not change this format for e.g. 6050 or similar - * devices. Change format only if more resolution - * (i.e. more than 2 bits magnitude) is needed. - */ - data->delta_gain_code[i] |= (1 << 2); - } - - IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", - data->delta_gain_code[1], data->delta_gain_code[2]); - - if (!data->radio_write) { - struct iwl_calib_chain_noise_gain_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - - iwl_set_calib_hdr(&cmd.hdr, - priv->phy_calib_chain_noise_gain_cmd); - cmd.delta_gain_1 = data->delta_gain_code[1]; - cmd.delta_gain_2 = data->delta_gain_code[2]; - iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, - CMD_ASYNC, sizeof(cmd), &cmd); - - data->radio_write = 1; - data->state = IWL_CHAIN_NOISE_CALIBRATED; - } -} - -/* - * Accumulate 16 beacons of signal and noise statistics for each of - * 3 receivers/antennas/rx-chains, then figure out: - * 1) Which antennas are connected. - * 2) Differential rx gain settings to balance the 3 receivers. - */ -void iwl_chain_noise_calibration(struct iwl_priv *priv) -{ - struct iwl_chain_noise_data *data = NULL; - - u32 chain_noise_a; - u32 chain_noise_b; - u32 chain_noise_c; - u32 chain_sig_a; - u32 chain_sig_b; - u32 chain_sig_c; - u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; - u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; - u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; - u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; - u16 i = 0; - u16 rxon_chnum = INITIALIZATION_VALUE; - u16 stat_chnum = INITIALIZATION_VALUE; - u8 rxon_band24; - u8 stat_band24; - struct statistics_rx_non_phy *rx_info; - - /* - * MULTI-FIXME: - * When we support multiple interfaces on different channels, - * this must be modified/fixed. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) - return; - - data = &(priv->chain_noise_data); - - /* - * Accumulate just the first "chain_noise_num_beacons" after - * the first association, then we're done forever. - */ - if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { - if (data->state == IWL_CHAIN_NOISE_ALIVE) - IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); - return; - } - - spin_lock_bh(&priv->statistics.lock); - - rx_info = &priv->statistics.rx_non_phy; - - if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { - IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); - spin_unlock_bh(&priv->statistics.lock); - return; - } - - rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); - rxon_chnum = le16_to_cpu(ctx->staging.channel); - stat_band24 = - !!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK); - stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16; - - /* Make sure we accumulate data for just the associated channel - * (even if scanning). */ - if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { - IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", - rxon_chnum, rxon_band24); - spin_unlock_bh(&priv->statistics.lock); - return; - } - - /* - * Accumulate beacon statistics values across - * "chain_noise_num_beacons" - */ - chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & - IN_BAND_FILTER; - chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & - IN_BAND_FILTER; - chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & - IN_BAND_FILTER; - - chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; - chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; - chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; - - spin_unlock_bh(&priv->statistics.lock); - - data->beacon_count++; - - data->chain_noise_a = (chain_noise_a + data->chain_noise_a); - data->chain_noise_b = (chain_noise_b + data->chain_noise_b); - data->chain_noise_c = (chain_noise_c + data->chain_noise_c); - - data->chain_signal_a = (chain_sig_a + data->chain_signal_a); - data->chain_signal_b = (chain_sig_b + data->chain_signal_b); - data->chain_signal_c = (chain_sig_c + data->chain_signal_c); - - IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", - rxon_chnum, rxon_band24, data->beacon_count); - IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", - chain_sig_a, chain_sig_b, chain_sig_c); - IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", - chain_noise_a, chain_noise_b, chain_noise_c); - - /* If this is the "chain_noise_num_beacons", determine: - * 1) Disconnected antennas (using signal strengths) - * 2) Differential gain (using silence noise) to balance receivers */ - if (data->beacon_count != IWL_CAL_NUM_BEACONS) - return; - - /* Analyze signal for disconnected antenna */ - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - /* Disable disconnected antenna algorithm for advanced - bt coex, assuming valid antennas are connected */ - data->active_chains = priv->nvm_data->valid_rx_ant; - for (i = 0; i < NUM_RX_CHAINS; i++) - if (!(data->active_chains & (1<disconn_array[i] = 1; - } else - iwl_find_disconn_antenna(priv, average_sig, data); - - /* Analyze noise for rx balance */ - average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS; - average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS; - average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS; - - for (i = 0; i < NUM_RX_CHAINS; i++) { - if (!(data->disconn_array[i]) && - (average_noise[i] <= min_average_noise)) { - /* This means that chain i is active and has - * lower noise values so far: */ - min_average_noise = average_noise[i]; - min_average_noise_antenna_i = i; - } - } - - IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", - average_noise[0], average_noise[1], - average_noise[2]); - - IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", - min_average_noise, min_average_noise_antenna_i); - - iwlagn_gain_computation( - priv, average_noise, - find_first_chain(priv->nvm_data->valid_rx_ant)); - - /* Some power changes may have been made during the calibration. - * Update and commit the RXON - */ - iwl_update_chain_flags(priv); - - data->state = IWL_CHAIN_NOISE_DONE; - iwl_power_update_mode(priv, false); -} - -void iwl_reset_run_time_calib(struct iwl_priv *priv) -{ - int i; - memset(&(priv->sensitivity_data), 0, - sizeof(struct iwl_sensitivity_data)); - memset(&(priv->chain_noise_data), 0, - sizeof(struct iwl_chain_noise_data)); - for (i = 0; i < NUM_RX_CHAINS; i++) - priv->chain_noise_data.delta_gain_code[i] = - CHAIN_NOISE_DELTA_GAIN_INIT_VAL; - - /* Ask for statistics now, the uCode will send notification - * periodically after association */ - iwl_send_statistics_request(priv, CMD_ASYNC, true); -} diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h deleted file mode 100644 index aeae4e80ea40..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/calib.h +++ /dev/null @@ -1,74 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#ifndef __iwl_calib_h__ -#define __iwl_calib_h__ - -#include "dev.h" -#include "commands.h" - -void iwl_chain_noise_calibration(struct iwl_priv *priv); -void iwl_sensitivity_calibration(struct iwl_priv *priv); - -void iwl_init_sensitivity(struct iwl_priv *priv); -void iwl_reset_run_time_calib(struct iwl_priv *priv); - -#endif /* __iwl_calib_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h deleted file mode 100644 index 7a34e4d158d1..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ /dev/null @@ -1,4008 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -/* - * Please use this file (commands.h) only for uCode API definitions. - * Please use iwl-xxxx-hw.h for hardware-related definitions. - * Please use dev.h for driver implementation definitions. - */ - -#ifndef __iwl_commands_h__ -#define __iwl_commands_h__ - -#include -#include - - -enum { - REPLY_ALIVE = 0x1, - REPLY_ERROR = 0x2, - REPLY_ECHO = 0x3, /* test command */ - - /* RXON and QOS commands */ - REPLY_RXON = 0x10, - REPLY_RXON_ASSOC = 0x11, - REPLY_QOS_PARAM = 0x13, - REPLY_RXON_TIMING = 0x14, - - /* Multi-Station support */ - REPLY_ADD_STA = 0x18, - REPLY_REMOVE_STA = 0x19, - REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ - REPLY_TXFIFO_FLUSH = 0x1e, - - /* Security */ - REPLY_WEPKEY = 0x20, - - /* RX, TX, LEDs */ - REPLY_TX = 0x1c, - REPLY_LEDS_CMD = 0x48, - REPLY_TX_LINK_QUALITY_CMD = 0x4e, - - /* WiMAX coexistence */ - COEX_PRIORITY_TABLE_CMD = 0x5a, - COEX_MEDIUM_NOTIFICATION = 0x5b, - COEX_EVENT_CMD = 0x5c, - - /* Calibration */ - TEMPERATURE_NOTIFICATION = 0x62, - CALIBRATION_CFG_CMD = 0x65, - CALIBRATION_RES_NOTIFICATION = 0x66, - CALIBRATION_COMPLETE_NOTIFICATION = 0x67, - - /* 802.11h related */ - REPLY_QUIET_CMD = 0x71, /* not used */ - REPLY_CHANNEL_SWITCH = 0x72, - CHANNEL_SWITCH_NOTIFICATION = 0x73, - REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, - SPECTRUM_MEASURE_NOTIFICATION = 0x75, - - /* Power Management */ - POWER_TABLE_CMD = 0x77, - PM_SLEEP_NOTIFICATION = 0x7A, - PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, - - /* Scan commands and notifications */ - REPLY_SCAN_CMD = 0x80, - REPLY_SCAN_ABORT_CMD = 0x81, - SCAN_START_NOTIFICATION = 0x82, - SCAN_RESULTS_NOTIFICATION = 0x83, - SCAN_COMPLETE_NOTIFICATION = 0x84, - - /* IBSS/AP commands */ - BEACON_NOTIFICATION = 0x90, - REPLY_TX_BEACON = 0x91, - WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */ - - /* Miscellaneous commands */ - REPLY_TX_POWER_DBM_CMD = 0x95, - QUIET_NOTIFICATION = 0x96, /* not used */ - REPLY_TX_PWR_TABLE_CMD = 0x97, - REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */ - TX_ANT_CONFIGURATION_CMD = 0x98, - MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ - - /* Bluetooth device coexistence config command */ - REPLY_BT_CONFIG = 0x9b, - - /* Statistics */ - REPLY_STATISTICS_CMD = 0x9c, - STATISTICS_NOTIFICATION = 0x9d, - - /* RF-KILL commands and notifications */ - REPLY_CARD_STATE_CMD = 0xa0, - CARD_STATE_NOTIFICATION = 0xa1, - - /* Missed beacons notification */ - MISSED_BEACONS_NOTIFICATION = 0xa2, - - REPLY_CT_KILL_CONFIG_CMD = 0xa4, - SENSITIVITY_CMD = 0xa8, - REPLY_PHY_CALIBRATION_CMD = 0xb0, - REPLY_RX_PHY_CMD = 0xc0, - REPLY_RX_MPDU_CMD = 0xc1, - REPLY_RX = 0xc3, - REPLY_COMPRESSED_BA = 0xc5, - - /* BT Coex */ - REPLY_BT_COEX_PRIO_TABLE = 0xcc, - REPLY_BT_COEX_PROT_ENV = 0xcd, - REPLY_BT_COEX_PROFILE_NOTIF = 0xce, - - /* PAN commands */ - REPLY_WIPAN_PARAMS = 0xb2, - REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */ - REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */ - REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */ - REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */ - REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */ - REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9, - REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, - REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd, - - REPLY_WOWLAN_PATTERNS = 0xe0, - REPLY_WOWLAN_WAKEUP_FILTER = 0xe1, - REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2, - REPLY_WOWLAN_TKIP_PARAMS = 0xe3, - REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4, - REPLY_WOWLAN_GET_STATUS = 0xe5, - REPLY_D3_CONFIG = 0xd3, - - REPLY_MAX = 0xff -}; - -/* - * Minimum number of queues. MAX_NUM is defined in hw specific files. - * Set the minimum to accommodate - * - 4 standard TX queues - * - the command queue - * - 4 PAN TX queues - * - the PAN multicast queue, and - * - the AUX (TX during scan dwell) queue. - */ -#define IWL_MIN_NUM_QUEUES 11 - -/* - * Command queue depends on iPAN support. - */ -#define IWL_DEFAULT_CMD_QUEUE_NUM 4 -#define IWL_IPAN_CMD_QUEUE_NUM 9 - -#define IWL_TX_FIFO_BK 0 /* shared */ -#define IWL_TX_FIFO_BE 1 -#define IWL_TX_FIFO_VI 2 /* shared */ -#define IWL_TX_FIFO_VO 3 -#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK -#define IWL_TX_FIFO_BE_IPAN 4 -#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI -#define IWL_TX_FIFO_VO_IPAN 5 -/* re-uses the VO FIFO, uCode will properly flush/schedule */ -#define IWL_TX_FIFO_AUX 5 -#define IWL_TX_FIFO_UNUSED 255 - -#define IWLAGN_CMD_FIFO_NUM 7 - -/* - * This queue number is required for proper operation - * because the ucode will stop/start the scheduler as - * required. - */ -#define IWL_IPAN_MCAST_QUEUE 8 - -/****************************************************************************** - * (0) - * Commonly used structures and definitions: - * Command header, rate_n_flags, txpower - * - *****************************************************************************/ - -/** - * iwlagn rate_n_flags bit fields - * - * rate_n_flags format is used in following iwlagn commands: - * REPLY_RX (response only) - * REPLY_RX_MPDU (response only) - * REPLY_TX (both command and response) - * REPLY_TX_LINK_QUALITY_CMD - * - * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): - * 2-0: 0) 6 Mbps - * 1) 12 Mbps - * 2) 18 Mbps - * 3) 24 Mbps - * 4) 36 Mbps - * 5) 48 Mbps - * 6) 54 Mbps - * 7) 60 Mbps - * - * 4-3: 0) Single stream (SISO) - * 1) Dual stream (MIMO) - * 2) Triple stream (MIMO) - * - * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data - * - * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"): - * 3-0: 0xD) 6 Mbps - * 0xF) 9 Mbps - * 0x5) 12 Mbps - * 0x7) 18 Mbps - * 0x9) 24 Mbps - * 0xB) 36 Mbps - * 0x1) 48 Mbps - * 0x3) 54 Mbps - * - * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): - * 6-0: 10) 1 Mbps - * 20) 2 Mbps - * 55) 5.5 Mbps - * 110) 11 Mbps - */ -#define RATE_MCS_CODE_MSK 0x7 -#define RATE_MCS_SPATIAL_POS 3 -#define RATE_MCS_SPATIAL_MSK 0x18 -#define RATE_MCS_HT_DUP_POS 5 -#define RATE_MCS_HT_DUP_MSK 0x20 -/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */ -#define RATE_MCS_RATE_MSK 0xff - -/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */ -#define RATE_MCS_FLAGS_POS 8 -#define RATE_MCS_HT_POS 8 -#define RATE_MCS_HT_MSK 0x100 - -/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ -#define RATE_MCS_CCK_POS 9 -#define RATE_MCS_CCK_MSK 0x200 - -/* Bit 10: (1) Use Green Field preamble */ -#define RATE_MCS_GF_POS 10 -#define RATE_MCS_GF_MSK 0x400 - -/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */ -#define RATE_MCS_HT40_POS 11 -#define RATE_MCS_HT40_MSK 0x800 - -/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */ -#define RATE_MCS_DUP_POS 12 -#define RATE_MCS_DUP_MSK 0x1000 - -/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ -#define RATE_MCS_SGI_POS 13 -#define RATE_MCS_SGI_MSK 0x2000 - -/** - * rate_n_flags Tx antenna masks - * 4965 has 2 transmitters - * 5100 has 1 transmitter B - * 5150 has 1 transmitter A - * 5300 has 3 transmitters - * 5350 has 3 transmitters - * bit14:16 - */ -#define RATE_MCS_ANT_POS 14 -#define RATE_MCS_ANT_A_MSK 0x04000 -#define RATE_MCS_ANT_B_MSK 0x08000 -#define RATE_MCS_ANT_C_MSK 0x10000 -#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK) -#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) -#define RATE_ANT_NUM 3 - -#define POWER_TABLE_NUM_ENTRIES 33 -#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 -#define POWER_TABLE_CCK_ENTRY 32 - -#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 -#define IWL_PWR_CCK_ENTRIES 2 - -/** - * struct tx_power_dual_stream - * - * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH - * - * Same format as iwl_tx_power_dual_stream, but __le32 - */ -struct tx_power_dual_stream { - __le32 dw; -} __packed; - -/** - * Command REPLY_TX_POWER_DBM_CMD = 0x98 - * struct iwlagn_tx_power_dbm_cmd - */ -#define IWLAGN_TX_POWER_AUTO 0x7f -#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6) - -struct iwlagn_tx_power_dbm_cmd { - s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ - u8 flags; - s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ - u8 reserved; -} __packed; - -/** - * Command TX_ANT_CONFIGURATION_CMD = 0x98 - * This command is used to configure valid Tx antenna. - * By default uCode concludes the valid antenna according to the radio flavor. - * This command enables the driver to override/modify this conclusion. - */ -struct iwl_tx_ant_config_cmd { - __le32 valid; -} __packed; - -/****************************************************************************** - * (0a) - * Alive and Error Commands & Responses: - * - *****************************************************************************/ - -#define UCODE_VALID_OK cpu_to_le32(0x1) - -/** - * REPLY_ALIVE = 0x1 (response only, not a command) - * - * uCode issues this "alive" notification once the runtime image is ready - * to receive commands from the driver. This is the *second* "alive" - * notification that the driver will receive after rebooting uCode; - * this "alive" is indicated by subtype field != 9. - * - * See comments documenting "BSM" (bootstrap state machine). - * - * This response includes two pointers to structures within the device's - * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging: - * - * 1) log_event_table_ptr indicates base of the event log. This traces - * a 256-entry history of uCode execution within a circular buffer. - * Its header format is: - * - * __le32 log_size; log capacity (in number of entries) - * __le32 type; (1) timestamp with each entry, (0) no timestamp - * __le32 wraps; # times uCode has wrapped to top of circular buffer - * __le32 write_index; next circular buffer entry that uCode would fill - * - * The header is followed by the circular buffer of log entries. Entries - * with timestamps have the following format: - * - * __le32 event_id; range 0 - 1500 - * __le32 timestamp; low 32 bits of TSF (of network, if associated) - * __le32 data; event_id-specific data value - * - * Entries without timestamps contain only event_id and data. - * - * - * 2) error_event_table_ptr indicates base of the error log. This contains - * information about any uCode error that occurs. For agn, the format - * of the error log is defined by struct iwl_error_event_table. - * - * The Linux driver can print both logs to the system log when a uCode error - * occurs. - */ - -/* - * Note: This structure is read from the device with IO accesses, - * and the reading already does the endian conversion. As it is - * read with u32-sized accesses, any members with a different size - * need to be ordered correctly though! - */ -struct iwl_error_event_table { - u32 valid; /* (nonzero) valid, (0) log is empty */ - u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ - u32 blink2; /* branch link */ - u32 ilink1; /* interrupt link */ - u32 ilink2; /* interrupt link */ - u32 data1; /* error-specific data */ - u32 data2; /* error-specific data */ - u32 line; /* source code line of error */ - u32 bcon_time; /* beacon timer */ - u32 tsf_low; /* network timestamp function timer */ - u32 tsf_hi; /* network timestamp function timer */ - u32 gp1; /* GP1 timer register */ - u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ - u32 ucode_ver; /* uCode version */ - u32 hw_ver; /* HW Silicon version */ - u32 brd_ver; /* HW board version */ - u32 log_pc; /* log program counter */ - u32 frame_ptr; /* frame pointer */ - u32 stack_ptr; /* stack pointer */ - u32 hcmd; /* last host command header */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: - * rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: - * host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: - * enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: - * time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: - * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ - u32 wait_event; /* wait event() caller address */ - u32 l2p_control; /* L2pControlField */ - u32 l2p_duration; /* L2pDurationField */ - u32 l2p_mhvalid; /* L2pMhValidBits */ - u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on - * (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the - * compilation */ - u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed; - -struct iwl_alive_resp { - u8 ucode_minor; - u8 ucode_major; - __le16 reserved1; - u8 sw_rev[8]; - u8 ver_type; - u8 ver_subtype; /* not "9" for runtime alive */ - __le16 reserved2; - __le32 log_event_table_ptr; /* SRAM address for event log */ - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 timestamp; - __le32 is_valid; -} __packed; - -/* - * REPLY_ERROR = 0x2 (response only, not a command) - */ -struct iwl_error_resp { - __le32 error_type; - u8 cmd_id; - u8 reserved1; - __le16 bad_cmd_seq_num; - __le32 error_info; - __le64 timestamp; -} __packed; - -/****************************************************************************** - * (1) - * RXON Commands & Responses: - * - *****************************************************************************/ - -/* - * Rx config defines & structure - */ -/* rx_config device types */ -enum { - RXON_DEV_TYPE_AP = 1, - RXON_DEV_TYPE_ESS = 3, - RXON_DEV_TYPE_IBSS = 4, - RXON_DEV_TYPE_SNIFFER = 6, - RXON_DEV_TYPE_CP = 7, - RXON_DEV_TYPE_2STA = 8, - RXON_DEV_TYPE_P2P = 9, -}; - - -#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) -#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) -#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) -#define RXON_RX_CHAIN_VALID_POS (1) -#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4) -#define RXON_RX_CHAIN_FORCE_SEL_POS (4) -#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7) -#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) -#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10) -#define RXON_RX_CHAIN_CNT_POS (10) -#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12) -#define RXON_RX_CHAIN_MIMO_CNT_POS (12) -#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14) -#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) - -/* rx_config flags */ -/* band & modulation selection */ -#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0) -#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1) -/* auto detection enable */ -#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2) -/* TGg protection when tx */ -#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3) -/* cck short slot & preamble */ -#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4) -#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5) -/* antenna selection */ -#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7) -#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00) -#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8) -#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9) -/* radar detection enable */ -#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12) -#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13) -/* rx response to host with 8-byte TSF -* (according to ON_AIR deassertion) */ -#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) - - -/* HT flags */ -#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) -#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) - -#define RXON_FLG_HT_OPERATING_MODE_POS (23) - -#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23) -#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23) - -#define RXON_FLG_CHANNEL_MODE_POS (25) -#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25) - -/* channel mode */ -enum { - CHANNEL_MODE_LEGACY = 0, - CHANNEL_MODE_PURE_40 = 1, - CHANNEL_MODE_MIXED = 2, - CHANNEL_MODE_RESERVED = 3, -}; -#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS) -#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS) -#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS) - -/* CTS to self (if spec allows) flag */ -#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30) - -/* rx_config filter flags */ -/* accept all data frames */ -#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0) -/* pass control & management to host */ -#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1) -/* accept multi-cast */ -#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2) -/* don't decrypt uni-cast frames */ -#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3) -/* don't decrypt multi-cast frames */ -#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4) -/* STA is associated */ -#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5) -/* transfer to host non bssid beacons in associated state */ -#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) - -/** - * REPLY_RXON = 0x10 (command, has simple generic response) - * - * RXON tunes the radio tuner to a service channel, and sets up a number - * of parameters that are used primarily for Rx, but also for Tx operations. - * - * NOTE: When tuning to a new channel, driver must set the - * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent - * info within the device, including the station tables, tx retry - * rate tables, and txpower tables. Driver must build a new station - * table and txpower table before transmitting anything on the RXON - * channel. - * - * NOTE: All RXONs wipe clean the internal txpower table. Driver must - * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), - * regardless of whether RXON_FILTER_ASSOC_MSK is set. - */ - -struct iwl_rxon_cmd { - u8 node_addr[6]; - __le16 reserved1; - u8 bssid_addr[6]; - __le16 reserved2; - u8 wlap_bssid_addr[6]; - __le16 reserved3; - u8 dev_type; - u8 air_propagation; - __le16 rx_chain; - u8 ofdm_basic_rates; - u8 cck_basic_rates; - __le16 assoc_id; - __le32 flags; - __le32 filter_flags; - __le16 channel; - u8 ofdm_ht_single_stream_basic_rates; - u8 ofdm_ht_dual_stream_basic_rates; - u8 ofdm_ht_triple_stream_basic_rates; - u8 reserved5; - __le16 acquisition_data; - __le16 reserved6; -} __packed; - -/* - * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) - */ -struct iwl_rxon_assoc_cmd { - __le32 flags; - __le32 filter_flags; - u8 ofdm_basic_rates; - u8 cck_basic_rates; - __le16 reserved1; - u8 ofdm_ht_single_stream_basic_rates; - u8 ofdm_ht_dual_stream_basic_rates; - u8 ofdm_ht_triple_stream_basic_rates; - u8 reserved2; - __le16 rx_chain_select_flags; - __le16 acquisition_data; - __le32 reserved3; -} __packed; - -#define IWL_CONN_MAX_LISTEN_INTERVAL 10 -#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ - -/* - * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) - */ -struct iwl_rxon_time_cmd { - __le64 timestamp; - __le16 beacon_interval; - __le16 atim_window; - __le32 beacon_init_val; - __le16 listen_interval; - u8 dtim_period; - u8 delta_cp_bss_tbtts; -} __packed; - -/* - * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) - */ -/** - * struct iwl5000_channel_switch_cmd - * @band: 0- 5.2GHz, 1- 2.4GHz - * @expect_beacon: 0- resume transmits after channel switch - * 1- wait for beacon to resume transmits - * @channel: new channel number - * @rxon_flags: Rx on flags - * @rxon_filter_flags: filtering parameters - * @switch_time: switch time in extended beacon format - * @reserved: reserved bytes - */ -struct iwl5000_channel_switch_cmd { - u8 band; - u8 expect_beacon; - __le16 channel; - __le32 rxon_flags; - __le32 rxon_filter_flags; - __le32 switch_time; - __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; -} __packed; - -/** - * struct iwl6000_channel_switch_cmd - * @band: 0- 5.2GHz, 1- 2.4GHz - * @expect_beacon: 0- resume transmits after channel switch - * 1- wait for beacon to resume transmits - * @channel: new channel number - * @rxon_flags: Rx on flags - * @rxon_filter_flags: filtering parameters - * @switch_time: switch time in extended beacon format - * @reserved: reserved bytes - */ -struct iwl6000_channel_switch_cmd { - u8 band; - u8 expect_beacon; - __le16 channel; - __le32 rxon_flags; - __le32 rxon_filter_flags; - __le32 switch_time; - __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; -} __packed; - -/* - * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) - */ -struct iwl_csa_notification { - __le16 band; - __le16 channel; - __le32 status; /* 0 - OK, 1 - fail */ -} __packed; - -/****************************************************************************** - * (2) - * Quality-of-Service (QOS) Commands & Responses: - * - *****************************************************************************/ - -/** - * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM - * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd - * - * @cw_min: Contention window, start value in numbers of slots. - * Should be a power-of-2, minus 1. Device's default is 0x0f. - * @cw_max: Contention window, max value in numbers of slots. - * Should be a power-of-2, minus 1. Device's default is 0x3f. - * @aifsn: Number of slots in Arbitration Interframe Space (before - * performing random backoff timing prior to Tx). Device default 1. - * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. - * - * Device will automatically increase contention window by (2*CW) + 1 for each - * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW - * value, to cap the CW value. - */ -struct iwl_ac_qos { - __le16 cw_min; - __le16 cw_max; - u8 aifsn; - u8 reserved1; - __le16 edca_txop; -} __packed; - -/* QoS flags defines */ -#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) -#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02) -#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10) - -/* Number of Access Categories (AC) (EDCA), queues 0..3 */ -#define AC_NUM 4 - -/* - * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) - * - * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs - * 0: Background, 1: Best Effort, 2: Video, 3: Voice. - */ -struct iwl_qosparam_cmd { - __le32 qos_flags; - struct iwl_ac_qos ac[AC_NUM]; -} __packed; - -/****************************************************************************** - * (3) - * Add/Modify Stations Commands & Responses: - * - *****************************************************************************/ -/* - * Multi station support - */ - -/* Special, dedicated locations within device's station table */ -#define IWL_AP_ID 0 -#define IWL_AP_ID_PAN 1 -#define IWL_STA_ID 2 -#define IWLAGN_PAN_BCAST_ID 14 -#define IWLAGN_BROADCAST_ID 15 -#define IWLAGN_STATION_COUNT 16 - -#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT - -#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) -#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) -#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13) -#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) -#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) -#define STA_FLG_MAX_AGG_SIZE_POS (19) -#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19) -#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21) -#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22) -#define STA_FLG_AGG_MPDU_DENSITY_POS (23) -#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23) - -/* Use in mode field. 1: modify existing entry, 0: add new station entry */ -#define STA_CONTROL_MODIFY_MSK 0x01 - -/* key flags __le16*/ -#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007) -#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000) -#define STA_KEY_FLG_WEP cpu_to_le16(0x0001) -#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002) -#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003) - -#define STA_KEY_FLG_KEYID_POS 8 -#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800) -/* wep key is either from global key (0) or from station info array (1) */ -#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008) - -/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ -#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) -#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) -#define STA_KEY_MAX_NUM 8 -#define STA_KEY_MAX_NUM_PAN 16 -/* must not match WEP_INVALID_OFFSET */ -#define IWLAGN_HW_KEY_DEFAULT 0xfe - -/* Flags indicate whether to modify vs. don't change various station params */ -#define STA_MODIFY_KEY_MASK 0x01 -#define STA_MODIFY_TID_DISABLE_TX 0x02 -#define STA_MODIFY_TX_RATE_MSK 0x04 -#define STA_MODIFY_ADDBA_TID_MSK 0x08 -#define STA_MODIFY_DELBA_TID_MSK 0x10 -#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 - -/* agn */ -struct iwl_keyinfo { - __le16 key_flags; - u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ - u8 reserved1; - __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ - u8 key_offset; - u8 reserved2; - u8 key[16]; /* 16-byte unicast decryption key */ - __le64 tx_secur_seq_cnt; - __le64 hw_tkip_mic_rx_key; - __le64 hw_tkip_mic_tx_key; -} __packed; - -/** - * struct sta_id_modify - * @addr[ETH_ALEN]: station's MAC address - * @sta_id: index of station in uCode's station table - * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change - * - * Driver selects unused table index when adding new station, - * or the index to a pre-existing station entry when modifying that station. - * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). - * - * modify_mask flags select which parameters to modify vs. leave alone. - */ -struct sta_id_modify { - u8 addr[ETH_ALEN]; - __le16 reserved1; - u8 sta_id; - u8 modify_mask; - __le16 reserved2; -} __packed; - -/* - * REPLY_ADD_STA = 0x18 (command) - * - * The device contains an internal table of per-station information, - * with info on security keys, aggregation parameters, and Tx rates for - * initial Tx attempt and any retries (agn devices uses - * REPLY_TX_LINK_QUALITY_CMD, - * - * REPLY_ADD_STA sets up the table entry for one station, either creating - * a new entry, or modifying a pre-existing one. - * - * NOTE: RXON command (without "associated" bit set) wipes the station table - * clean. Moving into RF_KILL state does this also. Driver must set up - * new station table before transmitting anything on the RXON channel - * (except active scans or active measurements; those commands carry - * their own txpower/rate setup data). - * - * When getting started on a new channel, driver must set up the - * IWL_BROADCAST_ID entry (last entry in the table). For a client - * station in a BSS, once an AP is selected, driver sets up the AP STA - * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP - * are all that are needed for a BSS client station. If the device is - * used as AP, or in an IBSS network, driver must set up station table - * entries for all STAs in network, starting with index IWL_STA_ID. - */ - -struct iwl_addsta_cmd { - u8 mode; /* 1: modify existing, 0: add new station */ - u8 reserved[3]; - struct sta_id_modify sta; - struct iwl_keyinfo key; - __le32 station_flags; /* STA_FLG_* */ - __le32 station_flags_msk; /* STA_FLG_* */ - - /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) - * corresponding to bit (e.g. bit 5 controls TID 5). - * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ - __le16 tid_disable_tx; - __le16 legacy_reserved; - - /* TID for which to add block-ack support. - * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ - u8 add_immediate_ba_tid; - - /* TID for which to remove block-ack support. - * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ - u8 remove_immediate_ba_tid; - - /* Starting Sequence Number for added block-ack support. - * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ - __le16 add_immediate_ba_ssn; - - /* - * Number of packets OK to transmit to station even though - * it is asleep -- used to synchronise PS-poll and u-APSD - * responses while ucode keeps track of STA sleep state. - */ - __le16 sleep_tx_count; - - __le16 reserved2; -} __packed; - - -#define ADD_STA_SUCCESS_MSK 0x1 -#define ADD_STA_NO_ROOM_IN_TABLE 0x2 -#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 -#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 -/* - * REPLY_ADD_STA = 0x18 (response) - */ -struct iwl_add_sta_resp { - u8 status; /* ADD_STA_* */ -} __packed; - -#define REM_STA_SUCCESS_MSK 0x1 -/* - * REPLY_REM_STA = 0x19 (response) - */ -struct iwl_rem_sta_resp { - u8 status; -} __packed; - -/* - * REPLY_REM_STA = 0x19 (command) - */ -struct iwl_rem_sta_cmd { - u8 num_sta; /* number of removed stations */ - u8 reserved[3]; - u8 addr[ETH_ALEN]; /* MAC addr of the first station */ - u8 reserved2[2]; -} __packed; - - -/* WiFi queues mask */ -#define IWL_SCD_BK_MSK BIT(0) -#define IWL_SCD_BE_MSK BIT(1) -#define IWL_SCD_VI_MSK BIT(2) -#define IWL_SCD_VO_MSK BIT(3) -#define IWL_SCD_MGMT_MSK BIT(3) - -/* PAN queues mask */ -#define IWL_PAN_SCD_BK_MSK BIT(4) -#define IWL_PAN_SCD_BE_MSK BIT(5) -#define IWL_PAN_SCD_VI_MSK BIT(6) -#define IWL_PAN_SCD_VO_MSK BIT(7) -#define IWL_PAN_SCD_MGMT_MSK BIT(7) -#define IWL_PAN_SCD_MULTICAST_MSK BIT(8) - -#define IWL_AGG_TX_QUEUE_MSK 0xffc00 - -#define IWL_DROP_ALL BIT(1) - -/* - * REPLY_TXFIFO_FLUSH = 0x1e(command and response) - * - * When using full FIFO flush this command checks the scheduler HW block WR/RD - * pointers to check if all the frames were transferred by DMA into the - * relevant TX FIFO queue. Only when the DMA is finished and the queue is - * empty the command can finish. - * This command is used to flush the TXFIFO from transmit commands, it may - * operate on single or multiple queues, the command queue can't be flushed by - * this command. The command response is returned when all the queue flush - * operations are done. Each TX command flushed return response with the FLUSH - * status set in the TX response status. When FIFO flush operation is used, - * the flush operation ends when both the scheduler DMA done and TXFIFO empty - * are set. - * - * @queue_control: bit mask for which queues to flush - * @flush_control: flush controls - * 0: Dump single MSDU - * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. - * 2: Dump all FIFO - */ -struct iwl_txfifo_flush_cmd_v3 { - __le32 queue_control; - __le16 flush_control; - __le16 reserved; -} __packed; - -struct iwl_txfifo_flush_cmd_v2 { - __le16 queue_control; - __le16 flush_control; -} __packed; - -/* - * REPLY_WEP_KEY = 0x20 - */ -struct iwl_wep_key { - u8 key_index; - u8 key_offset; - u8 reserved1[2]; - u8 key_size; - u8 reserved2[3]; - u8 key[16]; -} __packed; - -struct iwl_wep_cmd { - u8 num_keys; - u8 global_key_type; - u8 flags; - u8 reserved; - struct iwl_wep_key key[0]; -} __packed; - -#define WEP_KEY_WEP_TYPE 1 -#define WEP_KEYS_MAX 4 -#define WEP_INVALID_OFFSET 0xff -#define WEP_KEY_LEN_64 5 -#define WEP_KEY_LEN_128 13 - -/****************************************************************************** - * (4) - * Rx Responses: - * - *****************************************************************************/ - -#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0) -#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1) - -#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0) -#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) -#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) -#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) -#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70 -#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 -#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7) - -#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) -#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) -#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) -#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) -#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) -#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8) - -#define RX_RES_STATUS_STATION_FOUND (1<<6) -#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7) - -#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) -#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) -#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) -#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) -#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) - -#define RX_MPDU_RES_STATUS_ICV_OK (0x20) -#define RX_MPDU_RES_STATUS_MIC_OK (0x40) -#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) -#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) - - -#define IWLAGN_RX_RES_PHY_CNT 8 -#define IWLAGN_RX_RES_AGC_IDX 1 -#define IWLAGN_RX_RES_RSSI_AB_IDX 2 -#define IWLAGN_RX_RES_RSSI_C_IDX 3 -#define IWLAGN_OFDM_AGC_MSK 0xfe00 -#define IWLAGN_OFDM_AGC_BIT_POS 9 -#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff -#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00 -#define IWLAGN_OFDM_RSSI_A_BIT_POS 0 -#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000 -#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000 -#define IWLAGN_OFDM_RSSI_B_BIT_POS 16 -#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff -#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00 -#define IWLAGN_OFDM_RSSI_C_BIT_POS 0 - -struct iwlagn_non_cfg_phy { - __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */ -} __packed; - - -/* - * REPLY_RX = 0xc3 (response only, not a command) - * Used only for legacy (non 11n) frames. - */ -struct iwl_rx_phy_res { - u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ - u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ - u8 stat_id; /* configurable DSP phy data set ID */ - u8 reserved1; - __le64 timestamp; /* TSF at on air rise */ - __le32 beacon_time_stamp; /* beacon at on-air rise */ - __le16 phy_flags; /* general phy flags: band, modulation, ... */ - __le16 channel; /* channel number */ - u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ - __le32 rate_n_flags; /* RATE_MCS_* */ - __le16 byte_count; /* frame's byte-count */ - __le16 frame_time; /* frame's time on the air */ -} __packed; - -struct iwl_rx_mpdu_res_start { - __le16 byte_count; - __le16 reserved; -} __packed; - - -/****************************************************************************** - * (5) - * Tx Commands & Responses: - * - * Driver must place each REPLY_TX command into one of the prioritized Tx - * queues in host DRAM, shared between driver and device (see comments for - * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode - * are preparing to transmit, the device pulls the Tx command over the PCI - * bus via one of the device's Tx DMA channels, to fill an internal FIFO - * from which data will be transmitted. - * - * uCode handles all timing and protocol related to control frames - * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler - * handle reception of block-acks; uCode updates the host driver via - * REPLY_COMPRESSED_BA. - * - * uCode handles retrying Tx when an ACK is expected but not received. - * This includes trying lower data rates than the one requested in the Tx - * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn). - * - * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. - * This command must be executed after every RXON command, before Tx can occur. - *****************************************************************************/ - -/* REPLY_TX Tx flags field */ - -/* - * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it - * before this frame. if CTS-to-self required check - * RXON_FLG_SELF_CTS_EN status. - */ -#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0) - -/* 1: Expect ACK from receiving station - * 0: Don't expect ACK (MAC header's duration field s/b 0) - * Set this for unicast frames, but not broadcast/multicast. */ -#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) - -/* For agn devices: - * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). - * Tx command's initial_rate_index indicates first rate to try; - * uCode walks through table for additional Tx attempts. - * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. - * This rate will be used for all Tx attempts; it will not be scaled. */ -#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4) - -/* 1: Expect immediate block-ack. - * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ -#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) - -/* Tx antenna selection field; reserved (0) for agn devices. */ -#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) - -/* 1: Ignore Bluetooth priority for this frame. - * 0: Delay Tx until Bluetooth device is done (normal usage). */ -#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12) - -/* 1: uCode overrides sequence control field in MAC header. - * 0: Driver provides sequence control field in MAC header. - * Set this for management frames, non-QOS data frames, non-unicast frames, - * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ -#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) - -/* 1: This frame is non-last MPDU; more fragments are coming. - * 0: Last fragment, or not using fragmentation. */ -#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14) - -/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. - * 0: No TSF required in outgoing frame. - * Set this for transmitting beacons and probe responses. */ -#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16) - -/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword - * alignment of frame's payload data field. - * 0: No pad - * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 - * field (but not both). Driver must align frame data (i.e. data following - * MAC header) to DWORD boundary. */ -#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20) - -/* accelerate aggregation support - * 0 - no CCMP encryption; 1 - CCMP encryption */ -#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22) - -/* HCCA-AP - disable duration overwriting. */ -#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) - - -/* - * TX command security control - */ -#define TX_CMD_SEC_WEP 0x01 -#define TX_CMD_SEC_CCM 0x02 -#define TX_CMD_SEC_TKIP 0x03 -#define TX_CMD_SEC_MSK 0x03 -#define TX_CMD_SEC_SHIFT 6 -#define TX_CMD_SEC_KEY128 0x08 - -/* - * REPLY_TX = 0x1c (command) - */ - -/* - * 4965 uCode updates these Tx attempt count values in host DRAM. - * Used for managing Tx retries when expecting block-acks. - * Driver should set these fields to 0. - */ -struct iwl_dram_scratch { - u8 try_cnt; /* Tx attempts */ - u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ - __le16 reserved; -} __packed; - -struct iwl_tx_cmd { - /* - * MPDU byte count: - * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, - * + 8 byte IV for CCM or TKIP (not used for WEP) - * + Data payload - * + 8-byte MIC (not used for CCM/WEP) - * NOTE: Does not include Tx command bytes, post-MAC pad bytes, - * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i - * Range: 14-2342 bytes. - */ - __le16 len; - - /* - * MPDU or MSDU byte count for next frame. - * Used for fragmentation and bursting, but not 11n aggregation. - * Same as "len", but for next frame. Set to 0 if not applicable. - */ - __le16 next_frame_len; - - __le32 tx_flags; /* TX_CMD_FLG_* */ - - /* uCode may modify this field of the Tx command (in host DRAM!). - * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ - struct iwl_dram_scratch scratch; - - /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ - __le32 rate_n_flags; /* RATE_MCS_* */ - - /* Index of destination station in uCode's station table */ - u8 sta_id; - - /* Type of security encryption: CCM or TKIP */ - u8 sec_ctl; /* TX_CMD_SEC_* */ - - /* - * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial - * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for - * data frames, this field may be used to selectively reduce initial - * rate (via non-0 value) for special frames (e.g. management), while - * still supporting rate scaling for all frames. - */ - u8 initial_rate_index; - u8 reserved; - u8 key[16]; - __le16 next_frame_flags; - __le16 reserved2; - union { - __le32 life_time; - __le32 attempt; - } stop_time; - - /* Host DRAM physical address pointer to "scratch" in this command. - * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ - __le32 dram_lsb_ptr; - u8 dram_msb_ptr; - - u8 rts_retry_limit; /*byte 50 */ - u8 data_retry_limit; /*byte 51 */ - u8 tid_tspec; - union { - __le16 pm_frame_timeout; - __le16 attempt_duration; - } timeout; - - /* - * Duration of EDCA burst Tx Opportunity, in 32-usec units. - * Set this if txop time is not specified by HCCA protocol (e.g. by AP). - */ - __le16 driver_txop; - - /* - * MAC header goes here, followed by 2 bytes padding if MAC header - * length is 26 or 30 bytes, followed by payload data - */ - u8 payload[0]; - struct ieee80211_hdr hdr[0]; -} __packed; - -/* - * TX command response is sent after *agn* transmission attempts. - * - * both postpone and abort status are expected behavior from uCode. there is - * no special operation required from driver; except for RFKILL_FLUSH, - * which required tx flush host command to flush all the tx frames in queues - */ -enum { - TX_STATUS_SUCCESS = 0x01, - TX_STATUS_DIRECT_DONE = 0x02, - /* postpone TX */ - TX_STATUS_POSTPONE_DELAY = 0x40, - TX_STATUS_POSTPONE_FEW_BYTES = 0x41, - TX_STATUS_POSTPONE_BT_PRIO = 0x42, - TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, - TX_STATUS_POSTPONE_CALC_TTAK = 0x44, - /* abort TX */ - TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, - TX_STATUS_FAIL_SHORT_LIMIT = 0x82, - TX_STATUS_FAIL_LONG_LIMIT = 0x83, - TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, - TX_STATUS_FAIL_DRAIN_FLOW = 0x85, - TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, - TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, - TX_STATUS_FAIL_DEST_PS = 0x88, - TX_STATUS_FAIL_HOST_ABORTED = 0x89, - TX_STATUS_FAIL_BT_RETRY = 0x8a, - TX_STATUS_FAIL_STA_INVALID = 0x8b, - TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, - TX_STATUS_FAIL_TID_DISABLE = 0x8d, - TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, - TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, - TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90, - TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, -}; - -#define TX_PACKET_MODE_REGULAR 0x0000 -#define TX_PACKET_MODE_BURST_SEQ 0x0100 -#define TX_PACKET_MODE_BURST_FIRST 0x0200 - -enum { - TX_POWER_PA_NOT_ACTIVE = 0x0, -}; - -enum { - TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ - TX_STATUS_DELAY_MSK = 0x00000040, - TX_STATUS_ABORT_MSK = 0x00000080, - TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ - TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ - TX_RESERVED = 0x00780000, /* bits 19:22 */ - TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ - TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ -}; - -/* ******************************* - * TX aggregation status - ******************************* */ - -enum { - AGG_TX_STATE_TRANSMITTED = 0x00, - AGG_TX_STATE_UNDERRUN_MSK = 0x01, - AGG_TX_STATE_BT_PRIO_MSK = 0x02, - AGG_TX_STATE_FEW_BYTES_MSK = 0x04, - AGG_TX_STATE_ABORT_MSK = 0x08, - AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, - AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, - AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40, - AGG_TX_STATE_SCD_QUERY_MSK = 0x80, - AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, - AGG_TX_STATE_RESPONSE_MSK = 0x1ff, - AGG_TX_STATE_DUMP_TX_MSK = 0x200, - AGG_TX_STATE_DELAY_TX_MSK = 0x400 -}; - -#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */ -#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */ -#define AGG_TX_TRY_POS 12 - -#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ - AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ - AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) - -/* # tx attempts for first frame in aggregation */ -#define AGG_TX_STATE_TRY_CNT_POS 12 -#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 - -/* Command ID and sequence number of Tx command for this frame */ -#define AGG_TX_STATE_SEQ_NUM_POS 16 -#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 - -/* - * REPLY_TX = 0x1c (response) - * - * This response may be in one of two slightly different formats, indicated - * by the frame_count field: - * - * 1) No aggregation (frame_count == 1). This reports Tx results for - * a single frame. Multiple attempts, at various bit rates, may have - * been made for this frame. - * - * 2) Aggregation (frame_count > 1). This reports Tx results for - * 2 or more frames that used block-acknowledge. All frames were - * transmitted at same rate. Rate scaling may have been used if first - * frame in this new agg block failed in previous agg block(s). - * - * Note that, for aggregation, ACK (block-ack) status is not delivered here; - * block-ack has not been received by the time the agn device records - * this status. - * This status relates to reasons the tx might have been blocked or aborted - * within the sending station (this agn device), rather than whether it was - * received successfully by the destination station. - */ -struct agg_tx_status { - __le16 status; - __le16 sequence; -} __packed; - -/* - * definitions for initial rate index field - * bits [3:0] initial rate index - * bits [6:4] rate table color, used for the initial rate - * bit-7 invalid rate indication - * i.e. rate was not chosen from rate table - * or rate table color was changed during frame retries - * refer tlc rate info - */ - -#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0 -#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f -#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4 -#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70 -#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 - -/* refer to ra_tid */ -#define IWLAGN_TX_RES_TID_POS 0 -#define IWLAGN_TX_RES_TID_MSK 0x0f -#define IWLAGN_TX_RES_RA_POS 4 -#define IWLAGN_TX_RES_RA_MSK 0xf0 - -struct iwlagn_tx_resp { - u8 frame_count; /* 1 no aggregation, >1 aggregation */ - u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ - u8 failure_rts; /* # failures due to unsuccessful RTS */ - u8 failure_frame; /* # failures due to no ACK (unused for agg) */ - - /* For non-agg: Rate at which frame was successful. - * For agg: Rate at which all frames were transmitted. */ - __le32 rate_n_flags; /* RATE_MCS_* */ - - /* For non-agg: RTS + CTS + frame tx attempts time + ACK. - * For agg: RTS + CTS + aggregation tx time + block-ack time. */ - __le16 wireless_media_time; /* uSecs */ - - u8 pa_status; /* RF power amplifier measurement (not used) */ - u8 pa_integ_res_a[3]; - u8 pa_integ_res_b[3]; - u8 pa_integ_res_C[3]; - - __le32 tfd_info; - __le16 seq_ctl; - __le16 byte_cnt; - u8 tlc_info; - u8 ra_tid; /* tid (0:3), sta_id (4:7) */ - __le16 frame_ctrl; - /* - * For non-agg: frame status TX_STATUS_* - * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status - * fields follow this one, up to frame_count. - * Bit fields: - * 11- 0: AGG_TX_STATE_* status code - * 15-12: Retry count for 1st frame in aggregation (retries - * occur if tx failed for this frame when it was a - * member of a previous aggregation block). If rate - * scaling is used, retry count indicates the rate - * table entry used for all frames in the new agg. - * 31-16: Sequence # for this frame's Tx cmd (not SSN!) - */ - struct agg_tx_status status; /* TX status (in aggregation - - * status of 1st frame) */ -} __packed; -/* - * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) - * - * Reports Block-Acknowledge from recipient station - */ -struct iwl_compressed_ba_resp { - __le32 sta_addr_lo32; - __le16 sta_addr_hi16; - __le16 reserved; - - /* Index of recipient (BA-sending) station in uCode's station table */ - u8 sta_id; - u8 tid; - __le16 seq_ctl; - __le64 bitmap; - __le16 scd_flow; - __le16 scd_ssn; - u8 txed; /* number of frames sent */ - u8 txed_2_done; /* number of frames acked */ - __le16 reserved1; -} __packed; - -/* - * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) - * - */ - -/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ -#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) - -/* # of EDCA prioritized tx fifos */ -#define LINK_QUAL_AC_NUM AC_NUM - -/* # entries in rate scale table to support Tx retries */ -#define LINK_QUAL_MAX_RETRY_NUM 16 - -/* Tx antenna selection values */ -#define LINK_QUAL_ANT_A_MSK (1 << 0) -#define LINK_QUAL_ANT_B_MSK (1 << 1) -#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) - - -/** - * struct iwl_link_qual_general_params - * - * Used in REPLY_TX_LINK_QUALITY_CMD - */ -struct iwl_link_qual_general_params { - u8 flags; - - /* No entries at or above this (driver chosen) index contain MIMO */ - u8 mimo_delimiter; - - /* Best single antenna to use for single stream (legacy, SISO). */ - u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ - - /* Best antennas to use for MIMO (unused for 4965, assumes both). */ - u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ - - /* - * If driver needs to use different initial rates for different - * EDCA QOS access categories (as implemented by tx fifos 0-3), - * this table will set that up, by indicating the indexes in the - * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. - * Otherwise, driver should set all entries to 0. - * - * Entry usage: - * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice - * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. - */ - u8 start_rate_index[LINK_QUAL_AC_NUM]; -} __packed; - -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ -#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) -#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) - -#define LINK_QUAL_AGG_DISABLE_START_DEF (3) -#define LINK_QUAL_AGG_DISABLE_START_MAX (255) -#define LINK_QUAL_AGG_DISABLE_START_MIN (0) - -#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) -#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) - -/** - * struct iwl_link_qual_agg_params - * - * Used in REPLY_TX_LINK_QUALITY_CMD - */ -struct iwl_link_qual_agg_params { - - /* - *Maximum number of uSec in aggregation. - * default set to 4000 (4 milliseconds) if not configured in .cfg - */ - __le16 agg_time_limit; - - /* - * Number of Tx retries allowed for a frame, before that frame will - * no longer be considered for the start of an aggregation sequence - * (scheduler will then try to tx it as single frame). - * Driver should set this to 3. - */ - u8 agg_dis_start_th; - - /* - * Maximum number of frames in aggregation. - * 0 = no limit (default). 1 = no aggregation. - * Other values = max # frames in aggregation. - */ - u8 agg_frame_cnt_limit; - - __le32 reserved; -} __packed; - -/* - * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) - * - * For agn devices - * - * Each station in the agn device's internal station table has its own table - * of 16 - * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when - * an ACK is not received. This command replaces the entire table for - * one station. - * - * NOTE: Station must already be in agn device's station table. - * Use REPLY_ADD_STA. - * - * The rate scaling procedures described below work well. Of course, other - * procedures are possible, and may work better for particular environments. - * - * - * FILLING THE RATE TABLE - * - * Given a particular initial rate and mode, as determined by the rate - * scaling algorithm described below, the Linux driver uses the following - * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the - * Link Quality command: - * - * - * 1) If using High-throughput (HT) (SISO or MIMO) initial rate: - * a) Use this same initial rate for first 3 entries. - * b) Find next lower available rate using same mode (SISO or MIMO), - * use for next 3 entries. If no lower rate available, switch to - * legacy mode (no HT40 channel, no MIMO, no short guard interval). - * c) If using MIMO, set command's mimo_delimiter to number of entries - * using MIMO (3 or 6). - * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel, - * no MIMO, no short guard interval), at the next lower bit rate - * (e.g. if second HT bit rate was 54, try 48 legacy), and follow - * legacy procedure for remaining table entries. - * - * 2) If using legacy initial rate: - * a) Use the initial rate for only one entry. - * b) For each following entry, reduce the rate to next lower available - * rate, until reaching the lowest available rate. - * c) When reducing rate, also switch antenna selection. - * d) Once lowest available rate is reached, repeat this rate until - * rate table is filled (16 entries), switching antenna each entry. - * - * - * ACCUMULATING HISTORY - * - * The rate scaling algorithm for agn devices, as implemented in Linux driver, - * uses two sets of frame Tx success history: One for the current/active - * modulation mode, and one for a speculative/search mode that is being - * attempted. If the speculative mode turns out to be more effective (i.e. - * actual transfer rate is better), then the driver continues to use the - * speculative mode as the new current active mode. - * - * Each history set contains, separately for each possible rate, data for a - * sliding window of the 62 most recent tx attempts at that rate. The data - * includes a shifting bitmap of success(1)/failure(0), and sums of successful - * and attempted frames, from which the driver can additionally calculate a - * success ratio (success / attempted) and number of failures - * (attempted - success), and control the size of the window (attempted). - * The driver uses the bit map to remove successes from the success sum, as - * the oldest tx attempts fall out of the window. - * - * When the agn device makes multiple tx attempts for a given frame, each - * attempt might be at a different rate, and have different modulation - * characteristics (e.g. antenna, fat channel, short guard interval), as set - * up in the rate scaling table in the Link Quality command. The driver must - * determine which rate table entry was used for each tx attempt, to determine - * which rate-specific history to update, and record only those attempts that - * match the modulation characteristics of the history set. - * - * When using block-ack (aggregation), all frames are transmitted at the same - * rate, since there is no per-attempt acknowledgment from the destination - * station. The Tx response struct iwl_tx_resp indicates the Tx rate in - * rate_n_flags field. After receiving a block-ack, the driver can update - * history for the entire block all at once. - * - * - * FINDING BEST STARTING RATE: - * - * When working with a selected initial modulation mode (see below), the - * driver attempts to find a best initial rate. The initial rate is the - * first entry in the Link Quality command's rate table. - * - * 1) Calculate actual throughput (success ratio * expected throughput, see - * table below) for current initial rate. Do this only if enough frames - * have been attempted to make the value meaningful: at least 6 failed - * tx attempts, or at least 8 successes. If not enough, don't try rate - * scaling yet. - * - * 2) Find available rates adjacent to current initial rate. Available means: - * a) supported by hardware && - * b) supported by association && - * c) within any constraints selected by user - * - * 3) Gather measured throughputs for adjacent rates. These might not have - * enough history to calculate a throughput. That's okay, we might try - * using one of them anyway! - * - * 4) Try decreasing rate if, for current rate: - * a) success ratio is < 15% || - * b) lower adjacent rate has better measured throughput || - * c) higher adjacent rate has worse throughput, and lower is unmeasured - * - * As a sanity check, if decrease was determined above, leave rate - * unchanged if: - * a) lower rate unavailable - * b) success ratio at current rate > 85% (very good) - * c) current measured throughput is better than expected throughput - * of lower rate (under perfect 100% tx conditions, see table below) - * - * 5) Try increasing rate if, for current rate: - * a) success ratio is < 15% || - * b) both adjacent rates' throughputs are unmeasured (try it!) || - * b) higher adjacent rate has better measured throughput || - * c) lower adjacent rate has worse throughput, and higher is unmeasured - * - * As a sanity check, if increase was determined above, leave rate - * unchanged if: - * a) success ratio at current rate < 70%. This is not particularly - * good performance; higher rate is sure to have poorer success. - * - * 6) Re-evaluate the rate after each tx frame. If working with block- - * acknowledge, history and statistics may be calculated for the entire - * block (including prior history that fits within the history windows), - * before re-evaluation. - * - * FINDING BEST STARTING MODULATION MODE: - * - * After working with a modulation mode for a "while" (and doing rate scaling), - * the driver searches for a new initial mode in an attempt to improve - * throughput. The "while" is measured by numbers of attempted frames: - * - * For legacy mode, search for new mode after: - * 480 successful frames, or 160 failed frames - * For high-throughput modes (SISO or MIMO), search for new mode after: - * 4500 successful frames, or 400 failed frames - * - * Mode switch possibilities are (3 for each mode): - * - * For legacy: - * Change antenna, try SISO (if HT association), try MIMO (if HT association) - * For SISO: - * Change antenna, try MIMO, try shortened guard interval (SGI) - * For MIMO: - * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI) - * - * When trying a new mode, use the same bit rate as the old/current mode when - * trying antenna switches and shortened guard interval. When switching to - * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate - * for which the expected throughput (under perfect conditions) is about the - * same or slightly better than the actual measured throughput delivered by - * the old/current mode. - * - * Actual throughput can be estimated by multiplying the expected throughput - * by the success ratio (successful / attempted tx frames). Frame size is - * not considered in this calculation; it assumes that frame size will average - * out to be fairly consistent over several samples. The following are - * metric values for expected throughput assuming 100% success ratio. - * Only G band has support for CCK rates: - * - * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60 - * - * G: 7 13 35 58 40 57 72 98 121 154 177 186 186 - * A: 0 0 0 0 40 57 72 98 121 154 177 186 186 - * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202 - * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211 - * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251 - * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257 - * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257 - * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264 - * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289 - * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293 - * - * After the new mode has been tried for a short while (minimum of 6 failed - * frames or 8 successful frames), compare success ratio and actual throughput - * estimate of the new mode with the old. If either is better with the new - * mode, continue to use the new mode. - * - * Continue comparing modes until all 3 possibilities have been tried. - * If moving from legacy to HT, try all 3 possibilities from the new HT - * mode. After trying all 3, a best mode is found. Continue to use this mode - * for the longer "while" described above (e.g. 480 successful frames for - * legacy), and then repeat the search process. - * - */ -struct iwl_link_quality_cmd { - - /* Index of destination/recipient station in uCode's station table */ - u8 sta_id; - u8 reserved1; - __le16 control; /* not used */ - struct iwl_link_qual_general_params general_params; - struct iwl_link_qual_agg_params agg_params; - - /* - * Rate info; when using rate-scaling, Tx command's initial_rate_index - * specifies 1st Tx rate attempted, via index into this table. - * agn devices works its way through table when retrying Tx. - */ - struct { - __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ - } rs_table[LINK_QUAL_MAX_RETRY_NUM]; - __le32 reserved2; -} __packed; - -/* - * BT configuration enable flags: - * bit 0 - 1: BT channel announcement enabled - * 0: disable - * bit 1 - 1: priority of BT device enabled - * 0: disable - * bit 2 - 1: BT 2 wire support enabled - * 0: disable - */ -#define BT_COEX_DISABLE (0x0) -#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0) -#define BT_ENABLE_PRIORITY BIT(1) -#define BT_ENABLE_2_WIRE BIT(2) - -#define BT_COEX_DISABLE (0x0) -#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY) - -#define BT_LEAD_TIME_MIN (0x0) -#define BT_LEAD_TIME_DEF (0x1E) -#define BT_LEAD_TIME_MAX (0xFF) - -#define BT_MAX_KILL_MIN (0x1) -#define BT_MAX_KILL_DEF (0x5) -#define BT_MAX_KILL_MAX (0xFF) - -#define BT_DURATION_LIMIT_DEF 625 -#define BT_DURATION_LIMIT_MAX 1250 -#define BT_DURATION_LIMIT_MIN 625 - -#define BT_ON_THRESHOLD_DEF 4 -#define BT_ON_THRESHOLD_MAX 1000 -#define BT_ON_THRESHOLD_MIN 1 - -#define BT_FRAG_THRESHOLD_DEF 0 -#define BT_FRAG_THRESHOLD_MAX 0 -#define BT_FRAG_THRESHOLD_MIN 0 - -#define BT_AGG_THRESHOLD_DEF 1200 -#define BT_AGG_THRESHOLD_MAX 8000 -#define BT_AGG_THRESHOLD_MIN 400 - -/* - * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) - * - * agn devices support hardware handshake with Bluetooth device on - * same platform. Bluetooth device alerts wireless device when it will Tx; - * wireless device can delay or kill its own Tx to accommodate. - */ -struct iwl_bt_cmd { - u8 flags; - u8 lead_time; - u8 max_kill; - u8 reserved; - __le32 kill_ack_mask; - __le32 kill_cts_mask; -} __packed; - -#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0) - -#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5)) -#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3 -#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0 -#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1 -#define IWLAGN_BT_FLAG_COEX_MODE_3W 2 -#define IWLAGN_BT_FLAG_COEX_MODE_4W 3 - -#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6) -/* Disable Sync PSPoll on SCO/eSCO */ -#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7) - -#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */ -#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */ - -#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF -#define IWLAGN_BT_PRIO_BOOST_MIN 0x00 -#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0 -#define IWLAGN_BT_PRIO_BOOST_DEFAULT32 0xF0F0F0F0 - -#define IWLAGN_BT_MAX_KILL_DEFAULT 5 - -#define IWLAGN_BT3_T7_DEFAULT 1 - -enum iwl_bt_kill_idx { - IWL_BT_KILL_DEFAULT = 0, - IWL_BT_KILL_OVERRIDE = 1, - IWL_BT_KILL_REDUCE = 2, -}; - -#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000) -#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000) -#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff) -#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE cpu_to_le32(0) - -#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2 - -#define IWLAGN_BT3_T2_DEFAULT 0xc - -#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0)) -#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1)) -#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2)) -#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3)) -#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4)) -#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5)) -#define IWLAGN_BT_VALID_REDUCED_TX_PWR cpu_to_le16(BIT(6)) -#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7)) - -#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \ - IWLAGN_BT_VALID_BOOST | \ - IWLAGN_BT_VALID_MAX_KILL | \ - IWLAGN_BT_VALID_3W_TIMERS | \ - IWLAGN_BT_VALID_KILL_ACK_MASK | \ - IWLAGN_BT_VALID_KILL_CTS_MASK | \ - IWLAGN_BT_VALID_REDUCED_TX_PWR | \ - IWLAGN_BT_VALID_3W_LUT) - -#define IWLAGN_BT_REDUCED_TX_PWR BIT(0) - -#define IWLAGN_BT_DECISION_LUT_SIZE 12 - -struct iwl_basic_bt_cmd { - u8 flags; - u8 ledtime; /* unused */ - u8 max_kill; - u8 bt3_timer_t7_value; - __le32 kill_ack_mask; - __le32 kill_cts_mask; - u8 bt3_prio_sample_time; - u8 bt3_timer_t2_value; - __le16 bt4_reaction_time; /* unused */ - __le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE]; - /* - * bit 0: use reduced tx power for control frame - * bit 1 - 7: reserved - */ - u8 reduce_txpower; - u8 reserved; - __le16 valid; -}; - -struct iwl_bt_cmd_v1 { - struct iwl_basic_bt_cmd basic; - u8 prio_boost; - /* - * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask - * if configure the following patterns - */ - u8 tx_prio_boost; /* SW boost of WiFi tx priority */ - __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ -}; - -struct iwl_bt_cmd_v2 { - struct iwl_basic_bt_cmd basic; - __le32 prio_boost; - /* - * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask - * if configure the following patterns - */ - u8 reserved; - u8 tx_prio_boost; /* SW boost of WiFi tx priority */ - __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ -}; - -#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0)) - -struct iwlagn_bt_sco_cmd { - __le32 flags; -}; - -/****************************************************************************** - * (6) - * Spectrum Management (802.11h) Commands, Responses, Notifications: - * - *****************************************************************************/ - -/* - * Spectrum Management - */ -#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ - RXON_FILTER_CTL2HOST_MSK | \ - RXON_FILTER_ACCEPT_GRP_MSK | \ - RXON_FILTER_DIS_DECRYPT_MSK | \ - RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ - RXON_FILTER_ASSOC_MSK | \ - RXON_FILTER_BCON_AWARE_MSK) - -struct iwl_measure_channel { - __le32 duration; /* measurement duration in extended beacon - * format */ - u8 channel; /* channel to measure */ - u8 type; /* see enum iwl_measure_type */ - __le16 reserved; -} __packed; - -/* - * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) - */ -struct iwl_spectrum_cmd { - __le16 len; /* number of bytes starting from token */ - u8 token; /* token id */ - u8 id; /* measurement id -- 0 or 1 */ - u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ - u8 periodic; /* 1 = periodic */ - __le16 path_loss_timeout; - __le32 start_time; /* start time in extended beacon format */ - __le32 reserved2; - __le32 flags; /* rxon flags */ - __le32 filter_flags; /* rxon filter flags */ - __le16 channel_count; /* minimum 1, maximum 10 */ - __le16 reserved3; - struct iwl_measure_channel channels[10]; -} __packed; - -/* - * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) - */ -struct iwl_spectrum_resp { - u8 token; - u8 id; /* id of the prior command replaced, or 0xff */ - __le16 status; /* 0 - command will be handled - * 1 - cannot handle (conflicts with another - * measurement) */ -} __packed; - -enum iwl_measurement_state { - IWL_MEASUREMENT_START = 0, - IWL_MEASUREMENT_STOP = 1, -}; - -enum iwl_measurement_status { - IWL_MEASUREMENT_OK = 0, - IWL_MEASUREMENT_CONCURRENT = 1, - IWL_MEASUREMENT_CSA_CONFLICT = 2, - IWL_MEASUREMENT_TGH_CONFLICT = 3, - /* 4-5 reserved */ - IWL_MEASUREMENT_STOPPED = 6, - IWL_MEASUREMENT_TIMEOUT = 7, - IWL_MEASUREMENT_PERIODIC_FAILED = 8, -}; - -#define NUM_ELEMENTS_IN_HISTOGRAM 8 - -struct iwl_measurement_histogram { - __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ - __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ -} __packed; - -/* clear channel availability counters */ -struct iwl_measurement_cca_counters { - __le32 ofdm; - __le32 cck; -} __packed; - -enum iwl_measure_type { - IWL_MEASURE_BASIC = (1 << 0), - IWL_MEASURE_CHANNEL_LOAD = (1 << 1), - IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), - IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), - IWL_MEASURE_FRAME = (1 << 4), - /* bits 5:6 are reserved */ - IWL_MEASURE_IDLE = (1 << 7), -}; - -/* - * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) - */ -struct iwl_spectrum_notification { - u8 id; /* measurement id -- 0 or 1 */ - u8 token; - u8 channel_index; /* index in measurement channel list */ - u8 state; /* 0 - start, 1 - stop */ - __le32 start_time; /* lower 32-bits of TSF */ - u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ - u8 channel; - u8 type; /* see enum iwl_measurement_type */ - u8 reserved1; - /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only - * valid if applicable for measurement type requested. */ - __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ - __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ - __le32 cca_time; /* channel load time in usecs */ - u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - - * unidentified */ - u8 reserved2[3]; - struct iwl_measurement_histogram histogram; - __le32 stop_time; /* lower 32-bits of TSF */ - __le32 status; /* see iwl_measurement_status */ -} __packed; - -/****************************************************************************** - * (7) - * Power Management Commands, Responses, Notifications: - * - *****************************************************************************/ - -/** - * struct iwl_powertable_cmd - Power Table Command - * @flags: See below: - * - * POWER_TABLE_CMD = 0x77 (command, has simple generic response) - * - * PM allow: - * bit 0 - '0' Driver not allow power management - * '1' Driver allow PM (use rest of parameters) - * - * uCode send sleep notifications: - * bit 1 - '0' Don't send sleep notification - * '1' send sleep notification (SEND_PM_NOTIFICATION) - * - * Sleep over DTIM - * bit 2 - '0' PM have to walk up every DTIM - * '1' PM could sleep over DTIM till listen Interval. - * - * PCI power managed - * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1) - * '1' !(PCI_CFG_LINK_CTRL & 0x1) - * - * Fast PD - * bit 4 - '1' Put radio to sleep when receiving frame for others - * - * Force sleep Modes - * bit 31/30- '00' use both mac/xtal sleeps - * '01' force Mac sleep - * '10' force xtal sleep - * '11' Illegal set - * - * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then - * ucode assume sleep over DTIM is allowed and we don't need to wake up - * for every DTIM. - */ -#define IWL_POWER_VEC_SIZE 5 - -#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) -#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0)) -#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1)) -#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) -#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) -#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) -#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5)) -#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6)) -#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7)) -#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8)) -#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9)) - -struct iwl_powertable_cmd { - __le16 flags; - u8 keep_alive_seconds; - u8 debug_flags; - __le32 rx_data_timeout; - __le32 tx_data_timeout; - __le32 sleep_interval[IWL_POWER_VEC_SIZE]; - __le32 keep_alive_beacons; -} __packed; - -/* - * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) - * all devices identical. - */ -struct iwl_sleep_notification { - u8 pm_sleep_mode; - u8 pm_wakeup_src; - __le16 reserved; - __le32 sleep_time; - __le32 tsf_low; - __le32 bcon_timer; -} __packed; - -/* Sleep states. all devices identical. */ -enum { - IWL_PM_NO_SLEEP = 0, - IWL_PM_SLP_MAC = 1, - IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, - IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, - IWL_PM_SLP_PHY = 4, - IWL_PM_SLP_REPENT = 5, - IWL_PM_WAKEUP_BY_TIMER = 6, - IWL_PM_WAKEUP_BY_DRIVER = 7, - IWL_PM_WAKEUP_BY_RFKILL = 8, - /* 3 reserved */ - IWL_PM_NUM_OF_MODES = 12, -}; - -/* - * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response) - */ -#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */ -#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */ -#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ -struct iwl_card_state_cmd { - __le32 status; /* CARD_STATE_CMD_* request new power state */ -} __packed; - -/* - * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; - -#define HW_CARD_DISABLED 0x01 -#define SW_CARD_DISABLED 0x02 -#define CT_CARD_DISABLED 0x04 -#define RXON_CARD_DISABLED 0x10 - -struct iwl_ct_kill_config { - __le32 reserved; - __le32 critical_temperature_M; - __le32 critical_temperature_R; -} __packed; - -/* 1000, and 6x00 */ -struct iwl_ct_kill_throttling_config { - __le32 critical_temperature_exit; - __le32 reserved; - __le32 critical_temperature_enter; -} __packed; - -/****************************************************************************** - * (8) - * Scan Commands, Responses, Notifications: - * - *****************************************************************************/ - -#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) -#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) - -/** - * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table - * - * One for each channel in the scan list. - * Each channel can independently select: - * 1) SSID for directed active scans - * 2) Txpower setting (for rate specified within Tx command) - * 3) How long to stay on-channel (behavior may be modified by quiet_time, - * quiet_plcp_th, good_CRC_th) - * - * To avoid uCode errors, make sure the following are true (see comments - * under struct iwl_scan_cmd about max_out_time and quiet_time): - * 1) If using passive_dwell (i.e. passive_dwell != 0): - * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) - * 2) quiet_time <= active_dwell - * 3) If restricting off-channel time (i.e. max_out_time !=0): - * passive_dwell < max_out_time - * active_dwell < max_out_time - */ - -struct iwl_scan_channel { - /* - * type is defined as: - * 0:0 1 = active, 0 = passive - * 1:20 SSID direct bit map; if a bit is set, then corresponding - * SSID IE is transmitted in probe request. - * 21:31 reserved - */ - __le32 type; - __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ - u8 tx_gain; /* gain for analog radio */ - u8 dsp_atten; /* gain for DSP */ - __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ - __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ -} __packed; - -/* set number of direct probes __le32 type */ -#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) - -/** - * struct iwl_ssid_ie - directed scan network information element - * - * Up to 20 of these may appear in REPLY_SCAN_CMD, - * selected by "type" bit field in struct iwl_scan_channel; - * each channel may select different ssids from among the 20 entries. - * SSID IEs get transmitted in reverse order of entry. - */ -struct iwl_ssid_ie { - u8 id; - u8 len; - u8 ssid[32]; -} __packed; - -#define PROBE_OPTION_MAX 20 -#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) -#define IWL_GOOD_CRC_TH_DISABLED 0 -#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) -#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) -#define IWL_MAX_CMD_SIZE 4096 - -/* - * REPLY_SCAN_CMD = 0x80 (command) - * - * The hardware scan command is very powerful; the driver can set it up to - * maintain (relatively) normal network traffic while doing a scan in the - * background. The max_out_time and suspend_time control the ratio of how - * long the device stays on an associated network channel ("service channel") - * vs. how long it's away from the service channel, i.e. tuned to other channels - * for scanning. - * - * max_out_time is the max time off-channel (in usec), and suspend_time - * is how long (in "extended beacon" format) that the scan is "suspended" - * after returning to the service channel. That is, suspend_time is the - * time that we stay on the service channel, doing normal work, between - * scan segments. The driver may set these parameters differently to support - * scanning when associated vs. not associated, and light vs. heavy traffic - * loads when associated. - * - * After receiving this command, the device's scan engine does the following; - * - * 1) Sends SCAN_START notification to driver - * 2) Checks to see if it has time to do scan for one channel - * 3) Sends NULL packet, with power-save (PS) bit set to 1, - * to tell AP that we're going off-channel - * 4) Tunes to first channel in scan list, does active or passive scan - * 5) Sends SCAN_RESULT notification to driver - * 6) Checks to see if it has time to do scan on *next* channel in list - * 7) Repeats 4-6 until it no longer has time to scan the next channel - * before max_out_time expires - * 8) Returns to service channel - * 9) Sends NULL packet with PS=0 to tell AP that we're back - * 10) Stays on service channel until suspend_time expires - * 11) Repeats entire process 2-10 until list is complete - * 12) Sends SCAN_COMPLETE notification - * - * For fast, efficient scans, the scan command also has support for staying on - * a channel for just a short time, if doing active scanning and getting no - * responses to the transmitted probe request. This time is controlled by - * quiet_time, and the number of received packets below which a channel is - * considered "quiet" is controlled by quiet_plcp_threshold. - * - * For active scanning on channels that have regulatory restrictions against - * blindly transmitting, the scan can listen before transmitting, to make sure - * that there is already legitimate activity on the channel. If enough - * packets are cleanly received on the channel (controlled by good_CRC_th, - * typical value 1), the scan engine starts transmitting probe requests. - * - * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. - * - * To avoid uCode errors, see timing restrictions described under - * struct iwl_scan_channel. - */ - -enum iwl_scan_flags { - /* BIT(0) currently unused */ - IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1), - /* bits 2-7 reserved */ -}; - -struct iwl_scan_cmd { - __le16 len; - u8 scan_flags; /* scan flags: see enum iwl_scan_flags */ - u8 channel_count; /* # channels in channel list */ - __le16 quiet_time; /* dwell only this # millisecs on quiet channel - * (only for active scan) */ - __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ - __le16 good_CRC_th; /* passive -> active promotion threshold */ - __le16 rx_chain; /* RXON_RX_CHAIN_* */ - __le32 max_out_time; /* max usec to be away from associated (service) - * channel */ - __le32 suspend_time; /* pause scan this long (in "extended beacon - * format") when returning to service chnl: - */ - __le32 flags; /* RXON_FLG_* */ - __le32 filter_flags; /* RXON_FILTER_* */ - - /* For active scans (set to all-0s for passive scans). - * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct iwl_tx_cmd tx_cmd; - - /* For directed active scans (set to all-0s otherwise) */ - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; - - /* - * Probe request frame, followed by channel list. - * - * Size of probe request frame is specified by byte count in tx_cmd. - * Channel list follows immediately after probe request frame. - * Number of channels in list is specified by channel_count. - * Each channel in list is of type: - * - * struct iwl_scan_channel channels[0]; - * - * NOTE: Only one band of channels can be scanned per pass. You - * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait - * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) - * before requesting another scan. - */ - u8 data[0]; -} __packed; - -/* Can abort will notify by complete notification with abort status. */ -#define CAN_ABORT_STATUS cpu_to_le32(0x1) -/* complete notification statuses */ -#define ABORT_STATUS 0x2 - -/* - * REPLY_SCAN_CMD = 0x80 (response) - */ -struct iwl_scanreq_notification { - __le32 status; /* 1: okay, 2: cannot fulfill request */ -} __packed; - -/* - * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) - */ -struct iwl_scanstart_notification { - __le32 tsf_low; - __le32 tsf_high; - __le32 beacon_timer; - u8 channel; - u8 band; - u8 reserved[2]; - __le32 status; -} __packed; - -#define SCAN_OWNER_STATUS 0x1 -#define MEASURE_OWNER_STATUS 0x2 - -#define IWL_PROBE_STATUS_OK 0 -#define IWL_PROBE_STATUS_TX_FAILED BIT(0) -/* error statuses combined with TX_FAILED */ -#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) -#define IWL_PROBE_STATUS_FAIL_BT BIT(2) - -#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ -/* - * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) - */ -struct iwl_scanresults_notification { - u8 channel; - u8 band; - u8 probe_status; - u8 num_probe_not_sent; /* not enough time to send */ - __le32 tsf_low; - __le32 tsf_high; - __le32 statistics[NUMBER_OF_STATISTICS]; -} __packed; - -/* - * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) - */ -struct iwl_scancomplete_notification { - u8 scanned_channels; - u8 status; - u8 bt_status; /* BT On/Off status */ - u8 last_channel; - __le32 tsf_low; - __le32 tsf_high; -} __packed; - - -/****************************************************************************** - * (9) - * IBSS/AP Commands and Notifications: - * - *****************************************************************************/ - -enum iwl_ibss_manager { - IWL_NOT_IBSS_MANAGER = 0, - IWL_IBSS_MANAGER = 1, -}; - -/* - * BEACON_NOTIFICATION = 0x90 (notification only, not a command) - */ - -struct iwlagn_beacon_notif { - struct iwlagn_tx_resp beacon_notify_hdr; - __le32 low_tsf; - __le32 high_tsf; - __le32 ibss_mgr_status; -} __packed; - -/* - * REPLY_TX_BEACON = 0x91 (command, has simple generic response) - */ - -struct iwl_tx_beacon_cmd { - struct iwl_tx_cmd tx; - __le16 tim_idx; - u8 tim_size; - u8 reserved1; - struct ieee80211_hdr frame[0]; /* beacon frame */ -} __packed; - -/****************************************************************************** - * (10) - * Statistics Commands and Notifications: - * - *****************************************************************************/ - -#define IWL_TEMP_CONVERT 260 - -#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 -#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 -#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 - -/* Used for passing to driver number of successes and failures per rate */ -struct rate_histogram { - union { - __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; - __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; - __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; - } success; - union { - __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; - __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; - __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; - } failed; -} __packed; - -/* statistics command response */ - -struct statistics_dbg { - __le32 burst_check; - __le32 burst_count; - __le32 wait_for_silence_timeout_cnt; - __le32 reserved[3]; -} __packed; - -struct statistics_rx_phy { - __le32 ina_cnt; - __le32 fina_cnt; - __le32 plcp_err; - __le32 crc32_err; - __le32 overrun_err; - __le32 early_overrun_err; - __le32 crc32_good; - __le32 false_alarm_cnt; - __le32 fina_sync_err_cnt; - __le32 sfd_timeout; - __le32 fina_timeout; - __le32 unresponded_rts; - __le32 rxe_frame_limit_overrun; - __le32 sent_ack_cnt; - __le32 sent_cts_cnt; - __le32 sent_ba_rsp_cnt; - __le32 dsp_self_kill; - __le32 mh_format_err; - __le32 re_acq_main_rssi_sum; - __le32 reserved3; -} __packed; - -struct statistics_rx_ht_phy { - __le32 plcp_err; - __le32 overrun_err; - __le32 early_overrun_err; - __le32 crc32_good; - __le32 crc32_err; - __le32 mh_format_err; - __le32 agg_crc32_good; - __le32 agg_mpdu_cnt; - __le32 agg_cnt; - __le32 unsupport_mcs; -} __packed; - -#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) - -struct statistics_rx_non_phy { - __le32 bogus_cts; /* CTS received when not expecting CTS */ - __le32 bogus_ack; /* ACK received when not expecting ACK */ - __le32 non_bssid_frames; /* number of frames with BSSID that - * doesn't belong to the STA BSSID */ - __le32 filtered_frames; /* count frames that were dumped in the - * filtering process */ - __le32 non_channel_beacons; /* beacons with our bss id but not on - * our serving channel */ - __le32 channel_beacons; /* beacons with our bss id and in our - * serving channel */ - __le32 num_missed_bcon; /* number of missed beacons */ - __le32 adc_rx_saturation_time; /* count in 0.8us units the time the - * ADC was in saturation */ - __le32 ina_detection_search_time;/* total time (in 0.8us) searched - * for INA */ - __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ - __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ - __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ - __le32 interference_data_flag; /* flag for interference data - * availability. 1 when data is - * available. */ - __le32 channel_load; /* counts RX Enable time in uSec */ - __le32 dsp_false_alarms; /* DSP false alarm (both OFDM - * and CCK) counter */ - __le32 beacon_rssi_a; - __le32 beacon_rssi_b; - __le32 beacon_rssi_c; - __le32 beacon_energy_a; - __le32 beacon_energy_b; - __le32 beacon_energy_c; -} __packed; - -struct statistics_rx_non_phy_bt { - struct statistics_rx_non_phy common; - /* additional stats for bt */ - __le32 num_bt_kills; - __le32 reserved[2]; -} __packed; - -struct statistics_rx { - struct statistics_rx_phy ofdm; - struct statistics_rx_phy cck; - struct statistics_rx_non_phy general; - struct statistics_rx_ht_phy ofdm_ht; -} __packed; - -struct statistics_rx_bt { - struct statistics_rx_phy ofdm; - struct statistics_rx_phy cck; - struct statistics_rx_non_phy_bt general; - struct statistics_rx_ht_phy ofdm_ht; -} __packed; - -/** - * struct statistics_tx_power - current tx power - * - * @ant_a: current tx power on chain a in 1/2 dB step - * @ant_b: current tx power on chain b in 1/2 dB step - * @ant_c: current tx power on chain c in 1/2 dB step - */ -struct statistics_tx_power { - u8 ant_a; - u8 ant_b; - u8 ant_c; - u8 reserved; -} __packed; - -struct statistics_tx_non_phy_agg { - __le32 ba_timeout; - __le32 ba_reschedule_frames; - __le32 scd_query_agg_frame_cnt; - __le32 scd_query_no_agg; - __le32 scd_query_agg; - __le32 scd_query_mismatch; - __le32 frame_not_ready; - __le32 underrun; - __le32 bt_prio_kill; - __le32 rx_ba_rsp_cnt; -} __packed; - -struct statistics_tx { - __le32 preamble_cnt; - __le32 rx_detected_cnt; - __le32 bt_prio_defer_cnt; - __le32 bt_prio_kill_cnt; - __le32 few_bytes_cnt; - __le32 cts_timeout; - __le32 ack_timeout; - __le32 expected_ack_cnt; - __le32 actual_ack_cnt; - __le32 dump_msdu_cnt; - __le32 burst_abort_next_frame_mismatch_cnt; - __le32 burst_abort_missing_next_frame_cnt; - __le32 cts_timeout_collision; - __le32 ack_or_ba_timeout_collision; - struct statistics_tx_non_phy_agg agg; - /* - * "tx_power" are optional parameters provided by uCode, - * 6000 series is the only device provide the information, - * Those are reserved fields for all the other devices - */ - struct statistics_tx_power tx_power; - __le32 reserved1; -} __packed; - - -struct statistics_div { - __le32 tx_on_a; - __le32 tx_on_b; - __le32 exec_time; - __le32 probe_time; - __le32 reserved1; - __le32 reserved2; -} __packed; - -struct statistics_general_common { - __le32 temperature; /* radio temperature */ - __le32 temperature_m; /* radio voltage */ - struct statistics_dbg dbg; - __le32 sleep_time; - __le32 slots_out; - __le32 slots_idle; - __le32 ttl_timestamp; - struct statistics_div div; - __le32 rx_enable_counter; - /* - * num_of_sos_states: - * count the number of times we have to re-tune - * in order to get out of bad PHY status - */ - __le32 num_of_sos_states; -} __packed; - -struct statistics_bt_activity { - /* Tx statistics */ - __le32 hi_priority_tx_req_cnt; - __le32 hi_priority_tx_denied_cnt; - __le32 lo_priority_tx_req_cnt; - __le32 lo_priority_tx_denied_cnt; - /* Rx statistics */ - __le32 hi_priority_rx_req_cnt; - __le32 hi_priority_rx_denied_cnt; - __le32 lo_priority_rx_req_cnt; - __le32 lo_priority_rx_denied_cnt; -} __packed; - -struct statistics_general { - struct statistics_general_common common; - __le32 reserved2; - __le32 reserved3; -} __packed; - -struct statistics_general_bt { - struct statistics_general_common common; - struct statistics_bt_activity activity; - __le32 reserved2; - __le32 reserved3; -} __packed; - -#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) -#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) -#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) - -/* - * REPLY_STATISTICS_CMD = 0x9c, - * all devices identical. - * - * This command triggers an immediate response containing uCode statistics. - * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. - * - * If the CLEAR_STATS configuration flag is set, uCode will clear its - * internal copy of the statistics (counters) after issuing the response. - * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). - * - * If the DISABLE_NOTIF configuration flag is set, uCode will not issue - * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag - * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. - */ -#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ -#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ -struct iwl_statistics_cmd { - __le32 configuration_flags; /* IWL_STATS_CONF_* */ -} __packed; - -/* - * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) - * - * By default, uCode issues this notification after receiving a beacon - * while associated. To disable this behavior, set DISABLE_NOTIF flag in the - * REPLY_STATISTICS_CMD 0x9c, above. - * - * Statistics counters continue to increment beacon after beacon, but are - * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD - * 0x9c with CLEAR_STATS bit set (see above). - * - * uCode also issues this notification during scans. uCode clears statistics - * appropriately so that each notification contains statistics for only the - * one channel that has just been scanned. - */ -#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) -#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) - -struct iwl_notif_statistics { - __le32 flag; - struct statistics_rx rx; - struct statistics_tx tx; - struct statistics_general general; -} __packed; - -struct iwl_bt_notif_statistics { - __le32 flag; - struct statistics_rx_bt rx; - struct statistics_tx tx; - struct statistics_general_bt general; -} __packed; - -/* - * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) - * - * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed - * in regardless of how many missed beacons, which mean when driver receive the - * notification, inside the command, it can find all the beacons information - * which include number of total missed beacons, number of consecutive missed - * beacons, number of beacons received and number of beacons expected to - * receive. - * - * If uCode detected consecutive_missed_beacons > 5, it will reset the radio - * in order to bring the radio/PHY back to working state; which has no relation - * to when driver will perform sensitivity calibration. - * - * Driver should set it own missed_beacon_threshold to decide when to perform - * sensitivity calibration based on number of consecutive missed beacons in - * order to improve overall performance, especially in noisy environment. - * - */ - -#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) -#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) -#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF - -struct iwl_missed_beacon_notif { - __le32 consecutive_missed_beacons; - __le32 total_missed_becons; - __le32 num_expected_beacons; - __le32 num_recvd_beacons; -} __packed; - - -/****************************************************************************** - * (11) - * Rx Calibration Commands: - * - * With the uCode used for open source drivers, most Tx calibration (except - * for Tx Power) and most Rx calibration is done by uCode during the - * "initialize" phase of uCode boot. Driver must calibrate only: - * - * 1) Tx power (depends on temperature), described elsewhere - * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas) - * 3) Receiver sensitivity (to optimize signal detection) - * - *****************************************************************************/ - -/** - * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) - * - * This command sets up the Rx signal detector for a sensitivity level that - * is high enough to lock onto all signals within the associated network, - * but low enough to ignore signals that are below a certain threshold, so as - * not to have too many "false alarms". False alarms are signals that the - * Rx DSP tries to lock onto, but then discards after determining that they - * are noise. - * - * The optimum number of false alarms is between 5 and 50 per 200 TUs - * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e. - * time listening, not transmitting). Driver must adjust sensitivity so that - * the ratio of actual false alarms to actual Rx time falls within this range. - * - * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each - * received beacon. These provide information to the driver to analyze the - * sensitivity. Don't analyze statistics that come in from scanning, or any - * other non-associated-network source. Pertinent statistics include: - * - * From "general" statistics (struct statistics_rx_non_phy): - * - * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) - * Measure of energy of desired signal. Used for establishing a level - * below which the device does not detect signals. - * - * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB) - * Measure of background noise in silent period after beacon. - * - * channel_load - * uSecs of actual Rx time during beacon period (varies according to - * how much time was spent transmitting). - * - * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: - * - * false_alarm_cnt - * Signal locks abandoned early (before phy-level header). - * - * plcp_err - * Signal locks abandoned late (during phy-level header). - * - * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from - * beacon to beacon, i.e. each value is an accumulation of all errors - * before and including the latest beacon. Values will wrap around to 0 - * after counting up to 2^32 - 1. Driver must differentiate vs. - * previous beacon's values to determine # false alarms in the current - * beacon period. - * - * Total number of false alarms = false_alarms + plcp_errs - * - * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd - * (notice that the start points for OFDM are at or close to settings for - * maximum sensitivity): - * - * START / MIN / MAX - * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 - * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 - * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 - * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 - * - * If actual rate of OFDM false alarms (+ plcp_errors) is too high - * (greater than 50 for each 204.8 msecs listening), reduce sensitivity - * by *adding* 1 to all 4 of the table entries above, up to the max for - * each entry. Conversely, if false alarm rate is too low (less than 5 - * for each 204.8 msecs listening), *subtract* 1 from each entry to - * increase sensitivity. - * - * For CCK sensitivity, keep track of the following: - * - * 1). 20-beacon history of maximum background noise, indicated by - * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the - * 3 receivers. For any given beacon, the "silence reference" is - * the maximum of last 60 samples (20 beacons * 3 receivers). - * - * 2). 10-beacon history of strongest signal level, as indicated - * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers, - * i.e. the strength of the signal through the best receiver at the - * moment. These measurements are "upside down", with lower values - * for stronger signals, so max energy will be *minimum* value. - * - * Then for any given beacon, the driver must determine the *weakest* - * of the strongest signals; this is the minimum level that needs to be - * successfully detected, when using the best receiver at the moment. - * "Max cck energy" is the maximum (higher value means lower energy!) - * of the last 10 minima. Once this is determined, driver must add - * a little margin by adding "6" to it. - * - * 3). Number of consecutive beacon periods with too few false alarms. - * Reset this to 0 at the first beacon period that falls within the - * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). - * - * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd - * (notice that the start points for CCK are at maximum sensitivity): - * - * START / MIN / MAX - * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 - * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 - * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 - * - * If actual rate of CCK false alarms (+ plcp_errors) is too high - * (greater than 50 for each 204.8 msecs listening), method for reducing - * sensitivity is: - * - * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, - * up to max 400. - * - * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, - * sensitivity has been reduced a significant amount; bring it up to - * a moderate 161. Otherwise, *add* 3, up to max 200. - * - * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, - * sensitivity has been reduced only a moderate or small amount; - * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, - * down to min 0. Otherwise (if gain has been significantly reduced), - * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. - * - * b) Save a snapshot of the "silence reference". - * - * If actual rate of CCK false alarms (+ plcp_errors) is too low - * (less than 5 for each 204.8 msecs listening), method for increasing - * sensitivity is used only if: - * - * 1a) Previous beacon did not have too many false alarms - * 1b) AND difference between previous "silence reference" and current - * "silence reference" (prev - current) is 2 or more, - * OR 2) 100 or more consecutive beacon periods have had rate of - * less than 5 false alarms per 204.8 milliseconds rx time. - * - * Method for increasing sensitivity: - * - * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, - * down to min 125. - * - * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, - * down to min 200. - * - * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. - * - * If actual rate of CCK false alarms (+ plcp_errors) is within good range - * (between 5 and 50 for each 204.8 msecs listening): - * - * 1) Save a snapshot of the silence reference. - * - * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), - * give some extra margin to energy threshold by *subtracting* 8 - * from value in HD_MIN_ENERGY_CCK_DET_INDEX. - * - * For all cases (too few, too many, good range), make sure that the CCK - * detection threshold (energy) is below the energy level for robust - * detection over the past 10 beacon periods, the "Max cck energy". - * Lower values mean higher energy; this means making sure that the value - * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". - * - */ - -/* - * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) - */ -#define HD_TABLE_SIZE (11) /* number of entries */ -#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ -#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) -#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) -#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) -#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) -#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) -#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) -#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) -#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) -#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) -#define HD_OFDM_ENERGY_TH_IN_INDEX (10) - -/* - * Additional table entries in enhance SENSITIVITY_CMD - */ -#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11) -#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12) -#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13) -#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14) -#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15) -#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16) -#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17) -#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18) -#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19) -#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20) -#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21) -#define HD_RESERVED (22) - -/* number of entries for enhanced tbl */ -#define ENHANCE_HD_TABLE_SIZE (23) - -/* number of additional entries for enhanced tbl */ -#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE) - -#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1 cpu_to_le16(0) -#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1 cpu_to_le16(0) -#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1 cpu_to_le16(0) -#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(668) -#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4) -#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(486) -#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(37) -#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(853) -#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4) -#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(476) -#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(99) - -#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2 cpu_to_le16(1) -#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2 cpu_to_le16(1) -#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2 cpu_to_le16(1) -#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(600) -#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(40) -#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(486) -#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(45) -#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(853) -#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(60) -#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(476) -#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(99) - - -/* Control field in struct iwl_sensitivity_cmd */ -#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) -#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) - -/** - * struct iwl_sensitivity_cmd - * @control: (1) updates working table, (0) updates default table - * @table: energy threshold values, use HD_* as index into table - * - * Always use "1" in "control" to update uCode's working table and DSP. - */ -struct iwl_sensitivity_cmd { - __le16 control; /* always use "1" */ - __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ -} __packed; - -/* - * - */ -struct iwl_enhance_sensitivity_cmd { - __le16 control; /* always use "1" */ - __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */ -} __packed; - - -/** - * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) - * - * This command sets the relative gains of agn device's 3 radio receiver chains. - * - * After the first association, driver should accumulate signal and noise - * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 - * beacons from the associated network (don't collect statistics that come - * in from scanning, or any other non-network source). - * - * DISCONNECTED ANTENNA: - * - * Driver should determine which antennas are actually connected, by comparing - * average beacon signal levels for the 3 Rx chains. Accumulate (add) the - * following values over 20 beacons, one accumulator for each of the chains - * a/b/c, from struct statistics_rx_non_phy: - * - * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) - * - * Find the strongest signal from among a/b/c. Compare the other two to the - * strongest. If any signal is more than 15 dB (times 20, unless you - * divide the accumulated values by 20) below the strongest, the driver - * considers that antenna to be disconnected, and should not try to use that - * antenna/chain for Rx or Tx. If both A and B seem to be disconnected, - * driver should declare the stronger one as connected, and attempt to use it - * (A and B are the only 2 Tx chains!). - * - * - * RX BALANCE: - * - * Driver should balance the 3 receivers (but just the ones that are connected - * to antennas, see above) for gain, by comparing the average signal levels - * detected during the silence after each beacon (background noise). - * Accumulate (add) the following values over 20 beacons, one accumulator for - * each of the chains a/b/c, from struct statistics_rx_non_phy: - * - * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) - * - * Find the weakest background noise level from among a/b/c. This Rx chain - * will be the reference, with 0 gain adjustment. Attenuate other channels by - * finding noise difference: - * - * (accum_noise[i] - accum_noise[reference]) / 30 - * - * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. - * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the - * driver should limit the difference results to a range of 0-3 (0-4.5 dB), - * and set bit 2 to indicate "reduce gain". The value for the reference - * (weakest) chain should be "0". - * - * diff_gain_[abc] bit fields: - * 2: (1) reduce gain, (0) increase gain - * 1-0: amount of gain, units of 1.5 dB - */ - -/* Phy calibration command for series */ -enum { - IWL_PHY_CALIBRATE_DC_CMD = 8, - IWL_PHY_CALIBRATE_LO_CMD = 9, - IWL_PHY_CALIBRATE_TX_IQ_CMD = 11, - IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, - IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, - IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, - IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18, -}; - -/* This enum defines the bitmap of various calibrations to enable in both - * init ucode and runtime ucode through CALIBRATION_CFG_CMD. - */ -enum iwl_ucode_calib_cfg { - IWL_CALIB_CFG_RX_BB_IDX = BIT(0), - IWL_CALIB_CFG_DC_IDX = BIT(1), - IWL_CALIB_CFG_LO_IDX = BIT(2), - IWL_CALIB_CFG_TX_IQ_IDX = BIT(3), - IWL_CALIB_CFG_RX_IQ_IDX = BIT(4), - IWL_CALIB_CFG_NOISE_IDX = BIT(5), - IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6), - IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7), - IWL_CALIB_CFG_PAPD_IDX = BIT(8), - IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9), - IWL_CALIB_CFG_TX_PWR_IDX = BIT(10), -}; - -#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \ - IWL_CALIB_CFG_DC_IDX | \ - IWL_CALIB_CFG_LO_IDX | \ - IWL_CALIB_CFG_TX_IQ_IDX | \ - IWL_CALIB_CFG_RX_IQ_IDX | \ - IWL_CALIB_CFG_CRYSTAL_IDX) - -#define IWL_CALIB_RT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \ - IWL_CALIB_CFG_DC_IDX | \ - IWL_CALIB_CFG_LO_IDX | \ - IWL_CALIB_CFG_TX_IQ_IDX | \ - IWL_CALIB_CFG_RX_IQ_IDX | \ - IWL_CALIB_CFG_TEMPERATURE_IDX | \ - IWL_CALIB_CFG_PAPD_IDX | \ - IWL_CALIB_CFG_TX_PWR_IDX | \ - IWL_CALIB_CFG_CRYSTAL_IDX) - -#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0)) - -struct iwl_calib_cfg_elmnt_s { - __le32 is_enable; - __le32 start; - __le32 send_res; - __le32 apply_res; - __le32 reserved; -} __packed; - -struct iwl_calib_cfg_status_s { - struct iwl_calib_cfg_elmnt_s once; - struct iwl_calib_cfg_elmnt_s perd; - __le32 flags; -} __packed; - -struct iwl_calib_cfg_cmd { - struct iwl_calib_cfg_status_s ucd_calib_cfg; - struct iwl_calib_cfg_status_s drv_calib_cfg; - __le32 reserved1; -} __packed; - -struct iwl_calib_hdr { - u8 op_code; - u8 first_group; - u8 groups_num; - u8 data_valid; -} __packed; - -struct iwl_calib_cmd { - struct iwl_calib_hdr hdr; - u8 data[0]; -} __packed; - -struct iwl_calib_xtal_freq_cmd { - struct iwl_calib_hdr hdr; - u8 cap_pin1; - u8 cap_pin2; - u8 pad[2]; -} __packed; - -#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700) -struct iwl_calib_temperature_offset_cmd { - struct iwl_calib_hdr hdr; - __le16 radio_sensor_offset; - __le16 reserved; -} __packed; - -struct iwl_calib_temperature_offset_v2_cmd { - struct iwl_calib_hdr hdr; - __le16 radio_sensor_offset_high; - __le16 radio_sensor_offset_low; - __le16 burntVoltageRef; - __le16 reserved; -} __packed; - -/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ -struct iwl_calib_chain_noise_reset_cmd { - struct iwl_calib_hdr hdr; - u8 data[0]; -}; - -/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */ -struct iwl_calib_chain_noise_gain_cmd { - struct iwl_calib_hdr hdr; - u8 delta_gain_1; - u8 delta_gain_2; - u8 pad[2]; -} __packed; - -/****************************************************************************** - * (12) - * Miscellaneous Commands: - * - *****************************************************************************/ - -/* - * LEDs Command & Response - * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) - * - * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), - * this command turns it on or off, or sets up a periodic blinking cycle. - */ -struct iwl_led_cmd { - __le32 interval; /* "interval" in uSec */ - u8 id; /* 1: Activity, 2: Link, 3: Tech */ - u8 off; /* # intervals off while blinking; - * "0", with >0 "on" value, turns LED on */ - u8 on; /* # intervals on while blinking; - * "0", regardless of "off", turns LED off */ - u8 reserved; -} __packed; - -/* - * station priority table entries - * also used as potential "events" value for both - * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD - */ - -/* - * COEX events entry flag masks - * RP - Requested Priority - * WP - Win Medium Priority: priority assigned when the contention has been won - */ -#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1) -#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2) -#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4) - -#define COEX_CU_UNASSOC_IDLE_RP 4 -#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4 -#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4 -#define COEX_CU_CALIBRATION_RP 4 -#define COEX_CU_PERIODIC_CALIBRATION_RP 4 -#define COEX_CU_CONNECTION_ESTAB_RP 4 -#define COEX_CU_ASSOCIATED_IDLE_RP 4 -#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4 -#define COEX_CU_ASSOC_AUTO_SCAN_RP 4 -#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4 -#define COEX_CU_RF_ON_RP 6 -#define COEX_CU_RF_OFF_RP 4 -#define COEX_CU_STAND_ALONE_DEBUG_RP 6 -#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4 -#define COEX_CU_RSRVD1_RP 4 -#define COEX_CU_RSRVD2_RP 4 - -#define COEX_CU_UNASSOC_IDLE_WP 3 -#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3 -#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3 -#define COEX_CU_CALIBRATION_WP 3 -#define COEX_CU_PERIODIC_CALIBRATION_WP 3 -#define COEX_CU_CONNECTION_ESTAB_WP 3 -#define COEX_CU_ASSOCIATED_IDLE_WP 3 -#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3 -#define COEX_CU_ASSOC_AUTO_SCAN_WP 3 -#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3 -#define COEX_CU_RF_ON_WP 3 -#define COEX_CU_RF_OFF_WP 3 -#define COEX_CU_STAND_ALONE_DEBUG_WP 6 -#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3 -#define COEX_CU_RSRVD1_WP 3 -#define COEX_CU_RSRVD2_WP 3 - -#define COEX_UNASSOC_IDLE_FLAGS 0 -#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) -#define COEX_UNASSOC_AUTO_SCAN_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) -#define COEX_CALIBRATION_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) -#define COEX_PERIODIC_CALIBRATION_FLAGS 0 -/* - * COEX_CONNECTION_ESTAB: - * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. - */ -#define COEX_CONNECTION_ESTAB_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ - COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) -#define COEX_ASSOCIATED_IDLE_FLAGS 0 -#define COEX_ASSOC_MANUAL_SCAN_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) -#define COEX_ASSOC_AUTO_SCAN_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) -#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0 -#define COEX_RF_ON_FLAGS 0 -#define COEX_RF_OFF_FLAGS 0 -#define COEX_STAND_ALONE_DEBUG_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) -#define COEX_IPAN_ASSOC_LEVEL_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ - COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) -#define COEX_RSRVD1_FLAGS 0 -#define COEX_RSRVD2_FLAGS 0 -/* - * COEX_CU_RF_ON is the event wrapping all radio ownership. - * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. - */ -#define COEX_CU_RF_ON_FLAGS \ - (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ - COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ - COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) - - -enum { - /* un-association part */ - COEX_UNASSOC_IDLE = 0, - COEX_UNASSOC_MANUAL_SCAN = 1, - COEX_UNASSOC_AUTO_SCAN = 2, - /* calibration */ - COEX_CALIBRATION = 3, - COEX_PERIODIC_CALIBRATION = 4, - /* connection */ - COEX_CONNECTION_ESTAB = 5, - /* association part */ - COEX_ASSOCIATED_IDLE = 6, - COEX_ASSOC_MANUAL_SCAN = 7, - COEX_ASSOC_AUTO_SCAN = 8, - COEX_ASSOC_ACTIVE_LEVEL = 9, - /* RF ON/OFF */ - COEX_RF_ON = 10, - COEX_RF_OFF = 11, - COEX_STAND_ALONE_DEBUG = 12, - /* IPAN */ - COEX_IPAN_ASSOC_LEVEL = 13, - /* reserved */ - COEX_RSRVD1 = 14, - COEX_RSRVD2 = 15, - COEX_NUM_OF_EVENTS = 16 -}; - -/* - * Coexistence WIFI/WIMAX Command - * COEX_PRIORITY_TABLE_CMD = 0x5a - * - */ -struct iwl_wimax_coex_event_entry { - u8 request_prio; - u8 win_medium_prio; - u8 reserved; - u8 flags; -} __packed; - -/* COEX flag masks */ - -/* Station table is valid */ -#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1) -/* UnMask wake up src at unassociated sleep */ -#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4) -/* UnMask wake up src at associated sleep */ -#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8) -/* Enable CoEx feature. */ -#define COEX_FLAGS_COEX_ENABLE_MSK (0x80) - -struct iwl_wimax_coex_cmd { - u8 flags; - u8 reserved[3]; - struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; -} __packed; - -/* - * Coexistence MEDIUM NOTIFICATION - * COEX_MEDIUM_NOTIFICATION = 0x5b - * - * notification from uCode to host to indicate medium changes - * - */ -/* - * status field - * bit 0 - 2: medium status - * bit 3: medium change indication - * bit 4 - 31: reserved - */ -/* status option values, (0 - 2 bits) */ -#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */ -#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */ -#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */ -#define COEX_MEDIUM_MSK (0x7) - -/* send notification status (1 bit) */ -#define COEX_MEDIUM_CHANGED (0x8) -#define COEX_MEDIUM_CHANGED_MSK (0x8) -#define COEX_MEDIUM_SHIFT (3) - -struct iwl_coex_medium_notification { - __le32 status; - __le32 events; -} __packed; - -/* - * Coexistence EVENT Command - * COEX_EVENT_CMD = 0x5c - * - * send from host to uCode for coex event request. - */ -/* flags options */ -#define COEX_EVENT_REQUEST_MSK (0x1) - -struct iwl_coex_event_cmd { - u8 flags; - u8 event; - __le16 reserved; -} __packed; - -struct iwl_coex_event_resp { - __le32 status; -} __packed; - - -/****************************************************************************** - * Bluetooth Coexistence commands - * - *****************************************************************************/ - -/* - * BT Status notification - * REPLY_BT_COEX_PROFILE_NOTIF = 0xce - */ -enum iwl_bt_coex_profile_traffic_load { - IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0, - IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1, - IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2, - IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3, -/* - * There are no more even though below is a u8, the - * indication from the BT device only has two bits. - */ -}; - -#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1 -#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2 - -/* BT UART message - Share Part (BT -> WiFi) */ -#define BT_UART_MSG_FRAME1MSGTYPE_POS (0) -#define BT_UART_MSG_FRAME1MSGTYPE_MSK \ - (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS) -#define BT_UART_MSG_FRAME1SSN_POS (3) -#define BT_UART_MSG_FRAME1SSN_MSK \ - (0x3 << BT_UART_MSG_FRAME1SSN_POS) -#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5) -#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \ - (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS) -#define BT_UART_MSG_FRAME1RESERVED_POS (6) -#define BT_UART_MSG_FRAME1RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME1RESERVED_POS) - -#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0) -#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \ - (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS) -#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2) -#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \ - (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS) -#define BT_UART_MSG_FRAME2CHLSEQN_POS (4) -#define BT_UART_MSG_FRAME2CHLSEQN_MSK \ - (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS) -#define BT_UART_MSG_FRAME2INBAND_POS (5) -#define BT_UART_MSG_FRAME2INBAND_MSK \ - (0x1 << BT_UART_MSG_FRAME2INBAND_POS) -#define BT_UART_MSG_FRAME2RESERVED_POS (6) -#define BT_UART_MSG_FRAME2RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME2RESERVED_POS) - -#define BT_UART_MSG_FRAME3SCOESCO_POS (0) -#define BT_UART_MSG_FRAME3SCOESCO_MSK \ - (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS) -#define BT_UART_MSG_FRAME3SNIFF_POS (1) -#define BT_UART_MSG_FRAME3SNIFF_MSK \ - (0x1 << BT_UART_MSG_FRAME3SNIFF_POS) -#define BT_UART_MSG_FRAME3A2DP_POS (2) -#define BT_UART_MSG_FRAME3A2DP_MSK \ - (0x1 << BT_UART_MSG_FRAME3A2DP_POS) -#define BT_UART_MSG_FRAME3ACL_POS (3) -#define BT_UART_MSG_FRAME3ACL_MSK \ - (0x1 << BT_UART_MSG_FRAME3ACL_POS) -#define BT_UART_MSG_FRAME3MASTER_POS (4) -#define BT_UART_MSG_FRAME3MASTER_MSK \ - (0x1 << BT_UART_MSG_FRAME3MASTER_POS) -#define BT_UART_MSG_FRAME3OBEX_POS (5) -#define BT_UART_MSG_FRAME3OBEX_MSK \ - (0x1 << BT_UART_MSG_FRAME3OBEX_POS) -#define BT_UART_MSG_FRAME3RESERVED_POS (6) -#define BT_UART_MSG_FRAME3RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME3RESERVED_POS) - -#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0) -#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \ - (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS) -#define BT_UART_MSG_FRAME4RESERVED_POS (6) -#define BT_UART_MSG_FRAME4RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME4RESERVED_POS) - -#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0) -#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \ - (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS) -#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2) -#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \ - (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS) -#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4) -#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \ - (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS) -#define BT_UART_MSG_FRAME5RESERVED_POS (6) -#define BT_UART_MSG_FRAME5RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME5RESERVED_POS) - -#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0) -#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \ - (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS) -#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5) -#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \ - (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS) -#define BT_UART_MSG_FRAME6RESERVED_POS (6) -#define BT_UART_MSG_FRAME6RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME6RESERVED_POS) - -#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0) -#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \ - (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS) -#define BT_UART_MSG_FRAME7PAGE_POS (3) -#define BT_UART_MSG_FRAME7PAGE_MSK \ - (0x1 << BT_UART_MSG_FRAME7PAGE_POS) -#define BT_UART_MSG_FRAME7INQUIRY_POS (4) -#define BT_UART_MSG_FRAME7INQUIRY_MSK \ - (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS) -#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5) -#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \ - (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS) -#define BT_UART_MSG_FRAME7RESERVED_POS (6) -#define BT_UART_MSG_FRAME7RESERVED_MSK \ - (0x3 << BT_UART_MSG_FRAME7RESERVED_POS) - -/* BT Session Activity 2 UART message (BT -> WiFi) */ -#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5) -#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \ - (0x1< - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include "iwl-debug.h" -#include "iwl-io.h" -#include "dev.h" -#include "agn.h" - -/* create and remove of files */ -#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, priv, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_u32(#name, mode, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -/* file operation */ -#define DEBUGFS_READ_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_WRITE_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .write = iwl_dbgfs_##name##_write, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - - -#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .write = iwl_dbgfs_##name##_write, \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - -static ssize_t iwl_dbgfs_sram_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - u32 val = 0; - char *buf; - ssize_t ret; - int i = 0; - bool device_format = false; - int offset = 0; - int len = 0; - int pos = 0; - int sram; - struct iwl_priv *priv = file->private_data; - const struct fw_img *img; - size_t bufsz; - - if (!iwl_is_ready_rf(priv)) - return -EAGAIN; - - /* default is to dump the entire data segment */ - if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { - priv->dbgfs_sram_offset = 0x800000; - if (!priv->ucode_loaded) - return -EINVAL; - img = &priv->fw->img[priv->cur_ucode]; - priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; - } - len = priv->dbgfs_sram_len; - - if (len == -4) { - device_format = true; - len = 4; - } - - bufsz = 50 + len * 4; - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", - len); - pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", - priv->dbgfs_sram_offset); - - /* adjust sram address since reads are only on even u32 boundaries */ - offset = priv->dbgfs_sram_offset & 0x3; - sram = priv->dbgfs_sram_offset & ~0x3; - - /* read the first u32 from sram */ - val = iwl_trans_read_mem32(priv->trans, sram); - - for (; len; len--) { - /* put the address at the start of every line */ - if (i == 0) - pos += scnprintf(buf + pos, bufsz - pos, - "%08X: ", sram + offset); - - if (device_format) - pos += scnprintf(buf + pos, bufsz - pos, - "%02x", (val >> (8 * (3 - offset))) & 0xff); - else - pos += scnprintf(buf + pos, bufsz - pos, - "%02x ", (val >> (8 * offset)) & 0xff); - - /* if all bytes processed, read the next u32 from sram */ - if (++offset == 4) { - sram += 4; - offset = 0; - val = iwl_trans_read_mem32(priv->trans, sram); - } - - /* put in extra spaces and split lines for human readability */ - if (++i == 16) { - i = 0; - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } else if (!(i & 7)) { - pos += scnprintf(buf + pos, bufsz - pos, " "); - } else if (!(i & 3)) { - pos += scnprintf(buf + pos, bufsz - pos, " "); - } - } - if (i) - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_sram_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[64]; - int buf_size; - u32 offset, len; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x,%x", &offset, &len) == 2) { - priv->dbgfs_sram_offset = offset; - priv->dbgfs_sram_len = len; - } else if (sscanf(buf, "%x", &offset) == 1) { - priv->dbgfs_sram_offset = offset; - priv->dbgfs_sram_len = -4; - } else { - priv->dbgfs_sram_offset = 0; - priv->dbgfs_sram_len = 0; - } - - return count; -} - -static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN]; - - if (!priv->wowlan_sram) - return -ENODATA; - - return simple_read_from_buffer(user_buf, count, ppos, - priv->wowlan_sram, - img->sec[IWL_UCODE_SECTION_DATA].len); -} -static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct iwl_station_entry *station; - struct iwl_tid_data *tid_data; - char *buf; - int i, j, pos = 0; - ssize_t ret; - /* Add 30 for initial string */ - const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); - - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", - priv->num_stations); - - for (i = 0; i < IWLAGN_STATION_COUNT; i++) { - station = &priv->stations[i]; - if (!station->used) - continue; - pos += scnprintf(buf + pos, bufsz - pos, - "station %d - addr: %pM, flags: %#x\n", - i, station->sta.sta.addr, - station->sta.station_flags_msk); - pos += scnprintf(buf + pos, bufsz - pos, - "TID seqno next_rclmd " - "rate_n_flags state txq\n"); - - for (j = 0; j < IWL_MAX_TID_COUNT; j++) { - tid_data = &priv->tid_data[i][j]; - pos += scnprintf(buf + pos, bufsz - pos, - "%d: 0x%.4x 0x%.4x 0x%.8x " - "%d %.2d", - j, tid_data->seq_number, - tid_data->next_reclaimed, - tid_data->agg.rate_n_flags, - tid_data->agg.state, - tid_data->agg.txq_id); - - if (tid_data->agg.wait_for_ba) - pos += scnprintf(buf + pos, bufsz - pos, - " - waitforba"); - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_nvm_read(struct file *file, - char __user *user_buf, - size_t count, - loff_t *ppos) -{ - ssize_t ret; - struct iwl_priv *priv = file->private_data; - int pos = 0, ofs = 0, buf_size = 0; - const u8 *ptr; - char *buf; - u16 nvm_ver; - size_t eeprom_len = priv->eeprom_blob_size; - buf_size = 4 * eeprom_len + 256; - - if (eeprom_len % 16) - return -ENODATA; - - ptr = priv->eeprom_blob; - if (!ptr) - return -ENOMEM; - - /* 4 characters for byte 0xYY */ - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - nvm_ver = priv->nvm_data->nvm_version; - pos += scnprintf(buf + pos, buf_size - pos, - "NVM version: 0x%x\n", nvm_ver); - for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n", - ofs, ptr + ofs); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct ieee80211_channel *channels = NULL; - const struct ieee80211_supported_band *supp_band = NULL; - int pos = 0, i, bufsz = PAGE_SIZE; - char *buf; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); - if (supp_band) { - channels = supp_band->channels; - - pos += scnprintf(buf + pos, bufsz - pos, - "Displaying %d channels in 2.4GHz band 802.11bg):\n", - supp_band->n_channels); - - for (i = 0; i < supp_band->n_channels; i++) - pos += scnprintf(buf + pos, bufsz - pos, - "%d: %ddBm: BSS%s%s, %s.\n", - channels[i].hw_value, - channels[i].max_power, - channels[i].flags & IEEE80211_CHAN_RADAR ? - " (IEEE 802.11h required)" : "", - ((channels[i].flags & IEEE80211_CHAN_NO_IR) - || (channels[i].flags & - IEEE80211_CHAN_RADAR)) ? "" : - ", IBSS", - channels[i].flags & - IEEE80211_CHAN_NO_IR ? - "passive only" : "active/passive"); - } - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); - if (supp_band) { - channels = supp_band->channels; - - pos += scnprintf(buf + pos, bufsz - pos, - "Displaying %d channels in 5.2GHz band (802.11a)\n", - supp_band->n_channels); - - for (i = 0; i < supp_band->n_channels; i++) - pos += scnprintf(buf + pos, bufsz - pos, - "%d: %ddBm: BSS%s%s, %s.\n", - channels[i].hw_value, - channels[i].max_power, - channels[i].flags & IEEE80211_CHAN_RADAR ? - " (IEEE 802.11h required)" : "", - ((channels[i].flags & IEEE80211_CHAN_NO_IR) - || (channels[i].flags & - IEEE80211_CHAN_RADAR)) ? "" : - ", IBSS", - channels[i].flags & - IEEE80211_CHAN_NO_IR ? - "passive only" : "active/passive"); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_status_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char buf[512]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", - test_bit(STATUS_RF_KILL_HW, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", - test_bit(STATUS_CT_KILL, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", - test_bit(STATUS_ALIVE, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", - test_bit(STATUS_READY, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", - test_bit(STATUS_EXIT_PENDING, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", - test_bit(STATUS_STATISTICS, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", - test_bit(STATUS_SCANNING, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", - test_bit(STATUS_SCAN_ABORTING, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", - test_bit(STATUS_SCAN_HW, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", - test_bit(STATUS_POWER_PMI, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", - test_bit(STATUS_FW_ERROR, &priv->status)); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = 24 * 64; /* 24 items * 64 char per item */ - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - for (cnt = 0; cnt < REPLY_MAX; cnt++) { - if (priv->rx_handlers_stats[cnt] > 0) - pos += scnprintf(buf + pos, bufsz - pos, - "\tRx handler[%36s]:\t\t %u\n", - iwl_dvm_get_cmd_string(cnt), - priv->rx_handlers_stats[cnt]); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - - char buf[8]; - int buf_size; - u32 reset_flag; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &reset_flag) != 1) - return -EFAULT; - if (reset_flag == 0) - memset(&priv->rx_handlers_stats[0], 0, - sizeof(priv->rx_handlers_stats)); - - return count; -} - -static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct iwl_rxon_context *ctx; - int pos = 0, i; - char buf[256 * NUM_IWL_RXON_CTX]; - const size_t bufsz = sizeof(buf); - - for_each_context(priv, ctx) { - pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", - ctx->ctxid); - for (i = 0; i < AC_NUM; i++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\tcw_min\tcw_max\taifsn\ttxop\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "AC[%d]\t%u\t%u\t%u\t%u\n", i, - ctx->qos_data.def_qos_parm.ac[i].cw_min, - ctx->qos_data.def_qos_parm.ac[i].cw_max, - ctx->qos_data.def_qos_parm.ac[i].aifsn, - ctx->qos_data.def_qos_parm.ac[i].edca_txop); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - struct iwl_tt_restriction *restriction; - char buf[100]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, - "Thermal Throttling Mode: %s\n", - tt->advanced_tt ? "Advance" : "Legacy"); - pos += scnprintf(buf + pos, bufsz - pos, - "Thermal Throttling State: %d\n", - tt->state); - if (tt->advanced_tt) { - restriction = tt->restriction + tt->state; - pos += scnprintf(buf + pos, bufsz - pos, - "Tx mode: %d\n", - restriction->tx_stream); - pos += scnprintf(buf + pos, bufsz - pos, - "Rx mode: %d\n", - restriction->rx_stream); - pos += scnprintf(buf + pos, bufsz - pos, - "HT mode: %d\n", - restriction->is_ht); - } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int ht40; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &ht40) != 1) - return -EFAULT; - if (!iwl_is_any_associated(priv)) - priv->disable_ht40 = ht40 ? true : false; - else - return -EINVAL; - - return count; -} - -static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[100]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, - "11n 40MHz Mode: %s\n", - priv->disable_ht40 ? "Disabled" : "Enabled"); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_temperature_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - - -static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int value; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%d", &value) != 1) - return -EINVAL; - - /* - * Our users expect 0 to be "CAM", but 0 isn't actually - * valid here. However, let's not confuse them and present - * IWL_POWER_INDEX_1 as "1", not "0". - */ - if (value == 0) - return -EINVAL; - else if (value > 0) - value -= 1; - - if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) - return -EINVAL; - - if (!iwl_is_ready_rf(priv)) - return -EAGAIN; - - priv->power_data.debug_sleep_level_override = value; - - mutex_lock(&priv->mutex); - iwl_power_update_mode(priv, true); - mutex_unlock(&priv->mutex); - - return count; -} - -static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[10]; - int pos, value; - const size_t bufsz = sizeof(buf); - - /* see the write function */ - value = priv->power_data.debug_sleep_level_override; - if (value >= 0) - value += 1; - - pos = scnprintf(buf, bufsz, "%d\n", value); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[200]; - int pos = 0, i; - const size_t bufsz = sizeof(buf); - struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd; - - pos += scnprintf(buf + pos, bufsz - pos, - "flags: %#.2x\n", le16_to_cpu(cmd->flags)); - pos += scnprintf(buf + pos, bufsz - pos, - "RX/TX timeout: %d/%d usec\n", - le32_to_cpu(cmd->rx_data_timeout), - le32_to_cpu(cmd->tx_data_timeout)); - for (i = 0; i < IWL_POWER_VEC_SIZE; i++) - pos += scnprintf(buf + pos, bufsz - pos, - "sleep_interval[%d]: %d\n", i, - le32_to_cpu(cmd->sleep_interval[i])); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -DEBUGFS_READ_WRITE_FILE_OPS(sram); -DEBUGFS_READ_FILE_OPS(wowlan_sram); -DEBUGFS_READ_FILE_OPS(nvm); -DEBUGFS_READ_FILE_OPS(stations); -DEBUGFS_READ_FILE_OPS(channels); -DEBUGFS_READ_FILE_OPS(status); -DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers); -DEBUGFS_READ_FILE_OPS(qos); -DEBUGFS_READ_FILE_OPS(thermal_throttling); -DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); -DEBUGFS_READ_FILE_OPS(temperature); -DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); -DEBUGFS_READ_FILE_OPS(current_sleep_command); - -static const char *fmt_value = " %-30s %10u\n"; -static const char *fmt_hex = " %-30s 0x%02X\n"; -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; -static const char *fmt_header = - "%-32s current cumulative delta max\n"; - -static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) -{ - int p = 0; - u32 flag; - - lockdep_assert_held(&priv->statistics.lock); - - flag = le32_to_cpu(priv->statistics.flag); - - p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); - if (flag & UCODE_STATISTICS_CLEAR_MSK) - p += scnprintf(buf + p, bufsz - p, - "\tStatistics have been cleared\n"); - p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", - (flag & UCODE_STATISTICS_FREQUENCY_MSK) - ? "2.4 GHz" : "5.2 GHz"); - p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", - (flag & UCODE_STATISTICS_NARROW_BAND_MSK) - ? "enabled" : "disabled"); - - return p; -} - -static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct statistics_rx_phy) * 40 + - sizeof(struct statistics_rx_non_phy) * 40 + - sizeof(struct statistics_rx_ht_phy) * 40 + 400; - ssize_t ret; - struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; - struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; - struct statistics_rx_non_phy *general, *accum_general; - struct statistics_rx_non_phy *delta_general, *max_general; - struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; - - if (!iwl_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - /* - * the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - spin_lock_bh(&priv->statistics.lock); - ofdm = &priv->statistics.rx_ofdm; - cck = &priv->statistics.rx_cck; - general = &priv->statistics.rx_non_phy; - ht = &priv->statistics.rx_ofdm_ht; - accum_ofdm = &priv->accum_stats.rx_ofdm; - accum_cck = &priv->accum_stats.rx_cck; - accum_general = &priv->accum_stats.rx_non_phy; - accum_ht = &priv->accum_stats.rx_ofdm_ht; - delta_ofdm = &priv->delta_stats.rx_ofdm; - delta_cck = &priv->delta_stats.rx_cck; - delta_general = &priv->delta_stats.rx_non_phy; - delta_ht = &priv->delta_stats.rx_ofdm_ht; - max_ofdm = &priv->max_delta_stats.rx_ofdm; - max_cck = &priv->max_delta_stats.rx_cck; - max_general = &priv->max_delta_stats.rx_non_phy; - max_ht = &priv->max_delta_stats.rx_ofdm_ht; - - pos += iwl_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - OFDM:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ina_cnt:", - le32_to_cpu(ofdm->ina_cnt), - accum_ofdm->ina_cnt, - delta_ofdm->ina_cnt, max_ofdm->ina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_cnt:", - le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, - delta_ofdm->fina_cnt, max_ofdm->fina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "plcp_err:", - le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, - delta_ofdm->plcp_err, max_ofdm->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_err:", - le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, - delta_ofdm->crc32_err, max_ofdm->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "overrun_err:", - le32_to_cpu(ofdm->overrun_err), - accum_ofdm->overrun_err, delta_ofdm->overrun_err, - max_ofdm->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "early_overrun_err:", - le32_to_cpu(ofdm->early_overrun_err), - accum_ofdm->early_overrun_err, - delta_ofdm->early_overrun_err, - max_ofdm->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_good:", - le32_to_cpu(ofdm->crc32_good), - accum_ofdm->crc32_good, delta_ofdm->crc32_good, - max_ofdm->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "false_alarm_cnt:", - le32_to_cpu(ofdm->false_alarm_cnt), - accum_ofdm->false_alarm_cnt, - delta_ofdm->false_alarm_cnt, - max_ofdm->false_alarm_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_sync_err_cnt:", - le32_to_cpu(ofdm->fina_sync_err_cnt), - accum_ofdm->fina_sync_err_cnt, - delta_ofdm->fina_sync_err_cnt, - max_ofdm->fina_sync_err_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sfd_timeout:", - le32_to_cpu(ofdm->sfd_timeout), - accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, - max_ofdm->sfd_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_timeout:", - le32_to_cpu(ofdm->fina_timeout), - accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, - max_ofdm->fina_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "unresponded_rts:", - le32_to_cpu(ofdm->unresponded_rts), - accum_ofdm->unresponded_rts, - delta_ofdm->unresponded_rts, - max_ofdm->unresponded_rts); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rxe_frame_lmt_ovrun:", - le32_to_cpu(ofdm->rxe_frame_limit_overrun), - accum_ofdm->rxe_frame_limit_overrun, - delta_ofdm->rxe_frame_limit_overrun, - max_ofdm->rxe_frame_limit_overrun); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ack_cnt:", - le32_to_cpu(ofdm->sent_ack_cnt), - accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, - max_ofdm->sent_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_cts_cnt:", - le32_to_cpu(ofdm->sent_cts_cnt), - accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, - max_ofdm->sent_cts_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ba_rsp_cnt:", - le32_to_cpu(ofdm->sent_ba_rsp_cnt), - accum_ofdm->sent_ba_rsp_cnt, - delta_ofdm->sent_ba_rsp_cnt, - max_ofdm->sent_ba_rsp_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dsp_self_kill:", - le32_to_cpu(ofdm->dsp_self_kill), - accum_ofdm->dsp_self_kill, - delta_ofdm->dsp_self_kill, - max_ofdm->dsp_self_kill); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "mh_format_err:", - le32_to_cpu(ofdm->mh_format_err), - accum_ofdm->mh_format_err, - delta_ofdm->mh_format_err, - max_ofdm->mh_format_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "re_acq_main_rssi_sum:", - le32_to_cpu(ofdm->re_acq_main_rssi_sum), - accum_ofdm->re_acq_main_rssi_sum, - delta_ofdm->re_acq_main_rssi_sum, - max_ofdm->re_acq_main_rssi_sum); - - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - CCK:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ina_cnt:", - le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, - delta_cck->ina_cnt, max_cck->ina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_cnt:", - le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, - delta_cck->fina_cnt, max_cck->fina_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "plcp_err:", - le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, - delta_cck->plcp_err, max_cck->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_err:", - le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, - delta_cck->crc32_err, max_cck->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "overrun_err:", - le32_to_cpu(cck->overrun_err), - accum_cck->overrun_err, delta_cck->overrun_err, - max_cck->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "early_overrun_err:", - le32_to_cpu(cck->early_overrun_err), - accum_cck->early_overrun_err, - delta_cck->early_overrun_err, - max_cck->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_good:", - le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, - delta_cck->crc32_good, max_cck->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "false_alarm_cnt:", - le32_to_cpu(cck->false_alarm_cnt), - accum_cck->false_alarm_cnt, - delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_sync_err_cnt:", - le32_to_cpu(cck->fina_sync_err_cnt), - accum_cck->fina_sync_err_cnt, - delta_cck->fina_sync_err_cnt, - max_cck->fina_sync_err_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sfd_timeout:", - le32_to_cpu(cck->sfd_timeout), - accum_cck->sfd_timeout, delta_cck->sfd_timeout, - max_cck->sfd_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "fina_timeout:", - le32_to_cpu(cck->fina_timeout), - accum_cck->fina_timeout, delta_cck->fina_timeout, - max_cck->fina_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "unresponded_rts:", - le32_to_cpu(cck->unresponded_rts), - accum_cck->unresponded_rts, delta_cck->unresponded_rts, - max_cck->unresponded_rts); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rxe_frame_lmt_ovrun:", - le32_to_cpu(cck->rxe_frame_limit_overrun), - accum_cck->rxe_frame_limit_overrun, - delta_cck->rxe_frame_limit_overrun, - max_cck->rxe_frame_limit_overrun); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ack_cnt:", - le32_to_cpu(cck->sent_ack_cnt), - accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, - max_cck->sent_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_cts_cnt:", - le32_to_cpu(cck->sent_cts_cnt), - accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, - max_cck->sent_cts_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sent_ba_rsp_cnt:", - le32_to_cpu(cck->sent_ba_rsp_cnt), - accum_cck->sent_ba_rsp_cnt, - delta_cck->sent_ba_rsp_cnt, - max_cck->sent_ba_rsp_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dsp_self_kill:", - le32_to_cpu(cck->dsp_self_kill), - accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, - max_cck->dsp_self_kill); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "mh_format_err:", - le32_to_cpu(cck->mh_format_err), - accum_cck->mh_format_err, delta_cck->mh_format_err, - max_cck->mh_format_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "re_acq_main_rssi_sum:", - le32_to_cpu(cck->re_acq_main_rssi_sum), - accum_cck->re_acq_main_rssi_sum, - delta_cck->re_acq_main_rssi_sum, - max_cck->re_acq_main_rssi_sum); - - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - GENERAL:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bogus_cts:", - le32_to_cpu(general->bogus_cts), - accum_general->bogus_cts, delta_general->bogus_cts, - max_general->bogus_cts); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bogus_ack:", - le32_to_cpu(general->bogus_ack), - accum_general->bogus_ack, delta_general->bogus_ack, - max_general->bogus_ack); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "non_bssid_frames:", - le32_to_cpu(general->non_bssid_frames), - accum_general->non_bssid_frames, - delta_general->non_bssid_frames, - max_general->non_bssid_frames); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "filtered_frames:", - le32_to_cpu(general->filtered_frames), - accum_general->filtered_frames, - delta_general->filtered_frames, - max_general->filtered_frames); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "non_channel_beacons:", - le32_to_cpu(general->non_channel_beacons), - accum_general->non_channel_beacons, - delta_general->non_channel_beacons, - max_general->non_channel_beacons); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "channel_beacons:", - le32_to_cpu(general->channel_beacons), - accum_general->channel_beacons, - delta_general->channel_beacons, - max_general->channel_beacons); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "num_missed_bcon:", - le32_to_cpu(general->num_missed_bcon), - accum_general->num_missed_bcon, - delta_general->num_missed_bcon, - max_general->num_missed_bcon); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "adc_rx_saturation_time:", - le32_to_cpu(general->adc_rx_saturation_time), - accum_general->adc_rx_saturation_time, - delta_general->adc_rx_saturation_time, - max_general->adc_rx_saturation_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ina_detect_search_tm:", - le32_to_cpu(general->ina_detection_search_time), - accum_general->ina_detection_search_time, - delta_general->ina_detection_search_time, - max_general->ina_detection_search_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_silence_rssi_a:", - le32_to_cpu(general->beacon_silence_rssi_a), - accum_general->beacon_silence_rssi_a, - delta_general->beacon_silence_rssi_a, - max_general->beacon_silence_rssi_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_silence_rssi_b:", - le32_to_cpu(general->beacon_silence_rssi_b), - accum_general->beacon_silence_rssi_b, - delta_general->beacon_silence_rssi_b, - max_general->beacon_silence_rssi_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_silence_rssi_c:", - le32_to_cpu(general->beacon_silence_rssi_c), - accum_general->beacon_silence_rssi_c, - delta_general->beacon_silence_rssi_c, - max_general->beacon_silence_rssi_c); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "interference_data_flag:", - le32_to_cpu(general->interference_data_flag), - accum_general->interference_data_flag, - delta_general->interference_data_flag, - max_general->interference_data_flag); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "channel_load:", - le32_to_cpu(general->channel_load), - accum_general->channel_load, - delta_general->channel_load, - max_general->channel_load); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dsp_false_alarms:", - le32_to_cpu(general->dsp_false_alarms), - accum_general->dsp_false_alarms, - delta_general->dsp_false_alarms, - max_general->dsp_false_alarms); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_rssi_a:", - le32_to_cpu(general->beacon_rssi_a), - accum_general->beacon_rssi_a, - delta_general->beacon_rssi_a, - max_general->beacon_rssi_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_rssi_b:", - le32_to_cpu(general->beacon_rssi_b), - accum_general->beacon_rssi_b, - delta_general->beacon_rssi_b, - max_general->beacon_rssi_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_rssi_c:", - le32_to_cpu(general->beacon_rssi_c), - accum_general->beacon_rssi_c, - delta_general->beacon_rssi_c, - max_general->beacon_rssi_c); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_energy_a:", - le32_to_cpu(general->beacon_energy_a), - accum_general->beacon_energy_a, - delta_general->beacon_energy_a, - max_general->beacon_energy_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_energy_b:", - le32_to_cpu(general->beacon_energy_b), - accum_general->beacon_energy_b, - delta_general->beacon_energy_b, - max_general->beacon_energy_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "beacon_energy_c:", - le32_to_cpu(general->beacon_energy_c), - accum_general->beacon_energy_c, - delta_general->beacon_energy_c, - max_general->beacon_energy_c); - - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Rx - OFDM_HT:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "plcp_err:", - le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, - delta_ht->plcp_err, max_ht->plcp_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "overrun_err:", - le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, - delta_ht->overrun_err, max_ht->overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "early_overrun_err:", - le32_to_cpu(ht->early_overrun_err), - accum_ht->early_overrun_err, - delta_ht->early_overrun_err, - max_ht->early_overrun_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_good:", - le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, - delta_ht->crc32_good, max_ht->crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "crc32_err:", - le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, - delta_ht->crc32_err, max_ht->crc32_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "mh_format_err:", - le32_to_cpu(ht->mh_format_err), - accum_ht->mh_format_err, - delta_ht->mh_format_err, max_ht->mh_format_err); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg_crc32_good:", - le32_to_cpu(ht->agg_crc32_good), - accum_ht->agg_crc32_good, - delta_ht->agg_crc32_good, max_ht->agg_crc32_good); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg_mpdu_cnt:", - le32_to_cpu(ht->agg_mpdu_cnt), - accum_ht->agg_mpdu_cnt, - delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg_cnt:", - le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, - delta_ht->agg_cnt, max_ht->agg_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "unsupport_mcs:", - le32_to_cpu(ht->unsupport_mcs), - accum_ht->unsupport_mcs, - delta_ht->unsupport_mcs, max_ht->unsupport_mcs); - - spin_unlock_bh(&priv->statistics.lock); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct statistics_tx) * 48) + 250; - ssize_t ret; - struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; - - if (!iwl_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - /* the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - spin_lock_bh(&priv->statistics.lock); - - tx = &priv->statistics.tx; - accum_tx = &priv->accum_stats.tx; - delta_tx = &priv->delta_stats.tx; - max_tx = &priv->max_delta_stats.tx; - - pos += iwl_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_Tx:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "preamble:", - le32_to_cpu(tx->preamble_cnt), - accum_tx->preamble_cnt, - delta_tx->preamble_cnt, max_tx->preamble_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rx_detected_cnt:", - le32_to_cpu(tx->rx_detected_cnt), - accum_tx->rx_detected_cnt, - delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bt_prio_defer_cnt:", - le32_to_cpu(tx->bt_prio_defer_cnt), - accum_tx->bt_prio_defer_cnt, - delta_tx->bt_prio_defer_cnt, - max_tx->bt_prio_defer_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "bt_prio_kill_cnt:", - le32_to_cpu(tx->bt_prio_kill_cnt), - accum_tx->bt_prio_kill_cnt, - delta_tx->bt_prio_kill_cnt, - max_tx->bt_prio_kill_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "few_bytes_cnt:", - le32_to_cpu(tx->few_bytes_cnt), - accum_tx->few_bytes_cnt, - delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "cts_timeout:", - le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, - delta_tx->cts_timeout, max_tx->cts_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ack_timeout:", - le32_to_cpu(tx->ack_timeout), - accum_tx->ack_timeout, - delta_tx->ack_timeout, max_tx->ack_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "expected_ack_cnt:", - le32_to_cpu(tx->expected_ack_cnt), - accum_tx->expected_ack_cnt, - delta_tx->expected_ack_cnt, - max_tx->expected_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "actual_ack_cnt:", - le32_to_cpu(tx->actual_ack_cnt), - accum_tx->actual_ack_cnt, - delta_tx->actual_ack_cnt, - max_tx->actual_ack_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "dump_msdu_cnt:", - le32_to_cpu(tx->dump_msdu_cnt), - accum_tx->dump_msdu_cnt, - delta_tx->dump_msdu_cnt, - max_tx->dump_msdu_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "abort_nxt_frame_mismatch:", - le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), - accum_tx->burst_abort_next_frame_mismatch_cnt, - delta_tx->burst_abort_next_frame_mismatch_cnt, - max_tx->burst_abort_next_frame_mismatch_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "abort_missing_nxt_frame:", - le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), - accum_tx->burst_abort_missing_next_frame_cnt, - delta_tx->burst_abort_missing_next_frame_cnt, - max_tx->burst_abort_missing_next_frame_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "cts_timeout_collision:", - le32_to_cpu(tx->cts_timeout_collision), - accum_tx->cts_timeout_collision, - delta_tx->cts_timeout_collision, - max_tx->cts_timeout_collision); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "ack_ba_timeout_collision:", - le32_to_cpu(tx->ack_or_ba_timeout_collision), - accum_tx->ack_or_ba_timeout_collision, - delta_tx->ack_or_ba_timeout_collision, - max_tx->ack_or_ba_timeout_collision); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg ba_timeout:", - le32_to_cpu(tx->agg.ba_timeout), - accum_tx->agg.ba_timeout, - delta_tx->agg.ba_timeout, - max_tx->agg.ba_timeout); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg ba_resched_frames:", - le32_to_cpu(tx->agg.ba_reschedule_frames), - accum_tx->agg.ba_reschedule_frames, - delta_tx->agg.ba_reschedule_frames, - max_tx->agg.ba_reschedule_frames); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_agg_frame:", - le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), - accum_tx->agg.scd_query_agg_frame_cnt, - delta_tx->agg.scd_query_agg_frame_cnt, - max_tx->agg.scd_query_agg_frame_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_no_agg:", - le32_to_cpu(tx->agg.scd_query_no_agg), - accum_tx->agg.scd_query_no_agg, - delta_tx->agg.scd_query_no_agg, - max_tx->agg.scd_query_no_agg); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_agg:", - le32_to_cpu(tx->agg.scd_query_agg), - accum_tx->agg.scd_query_agg, - delta_tx->agg.scd_query_agg, - max_tx->agg.scd_query_agg); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg scd_query_mismatch:", - le32_to_cpu(tx->agg.scd_query_mismatch), - accum_tx->agg.scd_query_mismatch, - delta_tx->agg.scd_query_mismatch, - max_tx->agg.scd_query_mismatch); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg frame_not_ready:", - le32_to_cpu(tx->agg.frame_not_ready), - accum_tx->agg.frame_not_ready, - delta_tx->agg.frame_not_ready, - max_tx->agg.frame_not_ready); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg underrun:", - le32_to_cpu(tx->agg.underrun), - accum_tx->agg.underrun, - delta_tx->agg.underrun, max_tx->agg.underrun); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg bt_prio_kill:", - le32_to_cpu(tx->agg.bt_prio_kill), - accum_tx->agg.bt_prio_kill, - delta_tx->agg.bt_prio_kill, - max_tx->agg.bt_prio_kill); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "agg rx_ba_rsp_cnt:", - le32_to_cpu(tx->agg.rx_ba_rsp_cnt), - accum_tx->agg.rx_ba_rsp_cnt, - delta_tx->agg.rx_ba_rsp_cnt, - max_tx->agg.rx_ba_rsp_cnt); - - if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { - pos += scnprintf(buf + pos, bufsz - pos, - "tx power: (1/2 dB step)\n"); - if ((priv->nvm_data->valid_tx_ant & ANT_A) && - tx->tx_power.ant_a) - pos += scnprintf(buf + pos, bufsz - pos, - fmt_hex, "antenna A:", - tx->tx_power.ant_a); - if ((priv->nvm_data->valid_tx_ant & ANT_B) && - tx->tx_power.ant_b) - pos += scnprintf(buf + pos, bufsz - pos, - fmt_hex, "antenna B:", - tx->tx_power.ant_b); - if ((priv->nvm_data->valid_tx_ant & ANT_C) && - tx->tx_power.ant_c) - pos += scnprintf(buf + pos, bufsz - pos, - fmt_hex, "antenna C:", - tx->tx_power.ant_c); - } - - spin_unlock_bh(&priv->statistics.lock); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char *buf; - int bufsz = sizeof(struct statistics_general) * 10 + 300; - ssize_t ret; - struct statistics_general_common *general, *accum_general; - struct statistics_general_common *delta_general, *max_general; - struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; - struct statistics_div *div, *accum_div, *delta_div, *max_div; - - if (!iwl_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - /* the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - - spin_lock_bh(&priv->statistics.lock); - - general = &priv->statistics.common; - dbg = &priv->statistics.common.dbg; - div = &priv->statistics.common.div; - accum_general = &priv->accum_stats.common; - accum_dbg = &priv->accum_stats.common.dbg; - accum_div = &priv->accum_stats.common.div; - delta_general = &priv->delta_stats.common; - max_general = &priv->max_delta_stats.common; - delta_dbg = &priv->delta_stats.common.dbg; - max_dbg = &priv->max_delta_stats.common.dbg; - delta_div = &priv->delta_stats.common.div; - max_div = &priv->max_delta_stats.common.div; - - pos += iwl_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_header, "Statistics_General:"); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_value, "temperature:", - le32_to_cpu(general->temperature)); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_value, "temperature_m:", - le32_to_cpu(general->temperature_m)); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_value, "ttl_timestamp:", - le32_to_cpu(general->ttl_timestamp)); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "burst_check:", - le32_to_cpu(dbg->burst_check), - accum_dbg->burst_check, - delta_dbg->burst_check, max_dbg->burst_check); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "burst_count:", - le32_to_cpu(dbg->burst_count), - accum_dbg->burst_count, - delta_dbg->burst_count, max_dbg->burst_count); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "wait_for_silence_timeout_count:", - le32_to_cpu(dbg->wait_for_silence_timeout_cnt), - accum_dbg->wait_for_silence_timeout_cnt, - delta_dbg->wait_for_silence_timeout_cnt, - max_dbg->wait_for_silence_timeout_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "sleep_time:", - le32_to_cpu(general->sleep_time), - accum_general->sleep_time, - delta_general->sleep_time, max_general->sleep_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "slots_out:", - le32_to_cpu(general->slots_out), - accum_general->slots_out, - delta_general->slots_out, max_general->slots_out); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "slots_idle:", - le32_to_cpu(general->slots_idle), - accum_general->slots_idle, - delta_general->slots_idle, max_general->slots_idle); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "tx_on_a:", - le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, - delta_div->tx_on_a, max_div->tx_on_a); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "tx_on_b:", - le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, - delta_div->tx_on_b, max_div->tx_on_b); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "exec_time:", - le32_to_cpu(div->exec_time), accum_div->exec_time, - delta_div->exec_time, max_div->exec_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "probe_time:", - le32_to_cpu(div->probe_time), accum_div->probe_time, - delta_div->probe_time, max_div->probe_time); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "rx_enable_counter:", - le32_to_cpu(general->rx_enable_counter), - accum_general->rx_enable_counter, - delta_general->rx_enable_counter, - max_general->rx_enable_counter); - pos += scnprintf(buf + pos, bufsz - pos, - fmt_table, "num_of_sos_states:", - le32_to_cpu(general->num_of_sos_states), - accum_general->num_of_sos_states, - delta_general->num_of_sos_states, - max_general->num_of_sos_states); - - spin_unlock_bh(&priv->statistics.lock); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200; - ssize_t ret; - struct statistics_bt_activity *bt, *accum_bt; - - if (!iwl_is_alive(priv)) - return -EAGAIN; - - if (!priv->bt_enable_flag) - return -EINVAL; - - /* make request to uCode to retrieve statistics information */ - mutex_lock(&priv->mutex); - ret = iwl_send_statistics_request(priv, 0, false); - mutex_unlock(&priv->mutex); - - if (ret) - return -EAGAIN; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - /* - * the statistic information display here is based on - * the last statistics notification from uCode - * might not reflect the current uCode activity - */ - - spin_lock_bh(&priv->statistics.lock); - - bt = &priv->statistics.bt_activity; - accum_bt = &priv->accum_stats.bt_activity; - - pos += iwl_statistics_flag(priv, buf, bufsz); - pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "\t\t\tcurrent\t\t\taccumulative\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", - le32_to_cpu(bt->hi_priority_tx_req_cnt), - accum_bt->hi_priority_tx_req_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n", - le32_to_cpu(bt->hi_priority_tx_denied_cnt), - accum_bt->hi_priority_tx_denied_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", - le32_to_cpu(bt->lo_priority_tx_req_cnt), - accum_bt->lo_priority_tx_req_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n", - le32_to_cpu(bt->lo_priority_tx_denied_cnt), - accum_bt->lo_priority_tx_denied_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", - le32_to_cpu(bt->hi_priority_rx_req_cnt), - accum_bt->hi_priority_rx_req_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n", - le32_to_cpu(bt->hi_priority_rx_denied_cnt), - accum_bt->hi_priority_rx_denied_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", - le32_to_cpu(bt->lo_priority_rx_req_cnt), - accum_bt->lo_priority_rx_req_cnt); - pos += scnprintf(buf + pos, bufsz - pos, - "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", - le32_to_cpu(bt->lo_priority_rx_denied_cnt), - accum_bt->lo_priority_rx_denied_cnt); - - pos += scnprintf(buf + pos, bufsz - pos, - "(rx)num_bt_kills:\t\t%u\t\t\t%u\n", - le32_to_cpu(priv->statistics.num_bt_kills), - priv->statistics.accum_num_bt_kills); - - spin_unlock_bh(&priv->statistics.lock); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; - int pos = 0; - char *buf; - int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) + - (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200; - ssize_t ret; - - if (!iwl_is_alive(priv)) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY), - priv->reply_tx_stats.pp_delay); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES), - priv->reply_tx_stats.pp_few_bytes); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO), - priv->reply_tx_stats.pp_bt_prio); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD), - priv->reply_tx_stats.pp_quiet_period); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK), - priv->reply_tx_stats.pp_calc_ttak); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_tx_fail_reason( - TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY), - priv->reply_tx_stats.int_crossed_retry); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT), - priv->reply_tx_stats.short_limit); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT), - priv->reply_tx_stats.long_limit); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN), - priv->reply_tx_stats.fifo_underrun); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW), - priv->reply_tx_stats.drain_flow); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH), - priv->reply_tx_stats.rfkill_flush); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE), - priv->reply_tx_stats.life_expire); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS), - priv->reply_tx_stats.dest_ps); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED), - priv->reply_tx_stats.host_abort); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY), - priv->reply_tx_stats.pp_delay); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID), - priv->reply_tx_stats.sta_invalid); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED), - priv->reply_tx_stats.frag_drop); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE), - priv->reply_tx_stats.tid_disable); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED), - priv->reply_tx_stats.fifo_flush); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_tx_fail_reason( - TX_STATUS_FAIL_INSUFFICIENT_CF_POLL), - priv->reply_tx_stats.insuff_cf_poll); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX), - priv->reply_tx_stats.fail_hw_drop); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_tx_fail_reason( - TX_STATUS_FAIL_NO_BEACON_ON_RADAR), - priv->reply_tx_stats.sta_color_mismatch); - pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", - priv->reply_tx_stats.unknown); - - pos += scnprintf(buf + pos, bufsz - pos, - "\nStatistics_Agg_TX_Error:\n"); - - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK), - priv->reply_agg_tx_stats.underrun); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK), - priv->reply_agg_tx_stats.bt_prio); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK), - priv->reply_agg_tx_stats.few_bytes); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK), - priv->reply_agg_tx_stats.abort); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_agg_tx_fail_reason( - AGG_TX_STATE_LAST_SENT_TTL_MSK), - priv->reply_agg_tx_stats.last_sent_ttl); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_agg_tx_fail_reason( - AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK), - priv->reply_agg_tx_stats.last_sent_try); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_agg_tx_fail_reason( - AGG_TX_STATE_LAST_SENT_BT_KILL_MSK), - priv->reply_agg_tx_stats.last_sent_bt_kill); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK), - priv->reply_agg_tx_stats.scd_query); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", - iwl_get_agg_tx_fail_reason( - AGG_TX_STATE_TEST_BAD_CRC32_MSK), - priv->reply_agg_tx_stats.bad_crc32); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK), - priv->reply_agg_tx_stats.response); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK), - priv->reply_agg_tx_stats.dump_tx); - pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", - iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK), - priv->reply_agg_tx_stats.delay_tx); - pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", - priv->reply_agg_tx_stats.unknown); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; - ssize_t ret; - struct iwl_sensitivity_data *data; - - data = &priv->sensitivity_data; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", - data->auto_corr_ofdm); - pos += scnprintf(buf + pos, bufsz - pos, - "auto_corr_ofdm_mrc:\t\t %u\n", - data->auto_corr_ofdm_mrc); - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", - data->auto_corr_ofdm_x1); - pos += scnprintf(buf + pos, bufsz - pos, - "auto_corr_ofdm_mrc_x1:\t\t %u\n", - data->auto_corr_ofdm_mrc_x1); - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", - data->auto_corr_cck); - pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", - data->auto_corr_cck_mrc); - pos += scnprintf(buf + pos, bufsz - pos, - "last_bad_plcp_cnt_ofdm:\t\t %u\n", - data->last_bad_plcp_cnt_ofdm); - pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", - data->last_fa_cnt_ofdm); - pos += scnprintf(buf + pos, bufsz - pos, - "last_bad_plcp_cnt_cck:\t\t %u\n", - data->last_bad_plcp_cnt_cck); - pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", - data->last_fa_cnt_cck); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", - data->nrg_curr_state); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", - data->nrg_prev_state); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); - for (cnt = 0; cnt < 10; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->nrg_value[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); - for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->nrg_silence_rssi[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", - data->nrg_silence_ref); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", - data->nrg_energy_idx); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", - data->nrg_silence_idx); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", - data->nrg_th_cck); - pos += scnprintf(buf + pos, bufsz - pos, - "nrg_auto_corr_silence_diff:\t %u\n", - data->nrg_auto_corr_silence_diff); - pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", - data->num_in_cck_no_fa); - pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", - data->nrg_th_ofdm); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - - -static ssize_t iwl_dbgfs_chain_noise_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - int cnt = 0; - char *buf; - int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; - ssize_t ret; - struct iwl_chain_noise_data *data; - - data = &priv->chain_noise_data; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", - data->active_chains); - pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", - data->chain_noise_a); - pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", - data->chain_noise_b); - pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", - data->chain_noise_c); - pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", - data->chain_signal_a); - pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", - data->chain_signal_b); - pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", - data->chain_signal_c); - pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", - data->beacon_count); - - pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); - for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->disconn_array[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); - for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, " %u", - data->delta_gain_code[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", - data->radio_write); - pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", - data->state); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_power_save_status_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[60]; - int pos = 0; - const size_t bufsz = sizeof(buf); - u32 pwrsave_status; - - pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) & - CSR_GP_REG_POWER_SAVE_STATUS_MSK; - - pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); - pos += scnprintf(buf + pos, bufsz - pos, "%s\n", - (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : - (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : - (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : - "error"); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int clear; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &clear) != 1) - return -EFAULT; - - /* make request to uCode to retrieve statistics information */ - mutex_lock(&priv->mutex); - iwl_send_statistics_request(priv, 0, true); - mutex_unlock(&priv->mutex); - - return count; -} - -static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[128]; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", - priv->event_log.ucode_trace ? "On" : "Off"); - pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n", - priv->event_log.non_wraps_count); - pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n", - priv->event_log.wraps_once_count); - pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", - priv->event_log.wraps_more_count); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int trace; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &trace) != 1) - return -EFAULT; - - if (trace) { - priv->event_log.ucode_trace = true; - if (iwl_is_alive(priv)) { - /* start collecting data now */ - mod_timer(&priv->ucode_trace, jiffies); - } - } else { - priv->event_log.ucode_trace = false; - del_timer_sync(&priv->ucode_trace); - } - - return count; -} - -static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int len = 0; - char buf[20]; - - len = sprintf(buf, "0x%04X\n", - le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int len = 0; - char buf[20]; - - len = sprintf(buf, "0x%04X\n", - le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[12]; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "%d\n", - priv->missed_beacon_threshold); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int missed; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &missed) != 1) - return -EINVAL; - - if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || - missed > IWL_MISSED_BEACON_THRESHOLD_MAX) - priv->missed_beacon_threshold = - IWL_MISSED_BEACON_THRESHOLD_DEF; - else - priv->missed_beacon_threshold = missed; - - return count; -} - -static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[12]; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "%u\n", - priv->plcp_delta_threshold); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int plcp; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &plcp) != 1) - return -EINVAL; - if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || - (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) - priv->plcp_delta_threshold = - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; - else - priv->plcp_delta_threshold = plcp; - return count; -} - -static ssize_t iwl_dbgfs_rf_reset_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[300]; - const size_t bufsz = sizeof(buf); - struct iwl_rf_reset *rf_reset = &priv->rf_reset; - - pos += scnprintf(buf + pos, bufsz - pos, - "RF reset statistics\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request: %d\n", - rf_reset->reset_request_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request success: %d\n", - rf_reset->reset_success_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request reject: %d\n", - rf_reset->reset_reject_count); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_rf_reset_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - int ret; - - ret = iwl_force_rf_reset(priv, true); - return ret ? ret : count; -} - -static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int flush; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &flush) != 1) - return -EINVAL; - - if (iwl_is_rfkill(priv)) - return -EFAULT; - - iwlagn_dev_txfifo_flush(priv); - - return count; -} - -static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; - int pos = 0; - char buf[200]; - const size_t bufsz = sizeof(buf); - - if (!priv->bt_enable_flag) { - pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n"); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); - } - pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n", - priv->bt_enable_flag); - pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n", - priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, " - "last traffic notif: %d\n", - priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load); - pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " - "kill_ack_mask: %x, kill_cts_mask: %x\n", - priv->bt_ch_announce, priv->kill_ack_mask, - priv->kill_cts_mask); - - pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: "); - switch (priv->bt_traffic_load) { - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n"); - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - pos += scnprintf(buf + pos, bufsz - pos, "High\n"); - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - pos += scnprintf(buf + pos, bufsz - pos, "Low\n"); - break; - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - default: - pos += scnprintf(buf + pos, bufsz - pos, "None\n"); - break; - } - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; - - int pos = 0; - char buf[40]; - const size_t bufsz = sizeof(buf); - - if (priv->cfg->ht_params) - pos += scnprintf(buf + pos, bufsz - pos, - "use %s for aggregation\n", - (priv->hw_params.use_rts_for_aggregation) ? - "rts/cts" : "cts-to-self"); - else - pos += scnprintf(buf + pos, bufsz - pos, "N/A"); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int rts; - - if (!priv->cfg->ht_params) - return -EINVAL; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &rts) != 1) - return -EINVAL; - if (rts) - priv->hw_params.use_rts_for_aggregation = true; - else - priv->hw_params.use_rts_for_aggregation = false; - return count; -} - -static int iwl_cmd_echo_test(struct iwl_priv *priv) -{ - int ret; - struct iwl_host_cmd cmd = { - .id = REPLY_ECHO, - .len = { 0 }, - }; - - ret = iwl_dvm_send_cmd(priv, &cmd); - if (ret) - IWL_ERR(priv, "echo testing fail: 0X%x\n", ret); - else - IWL_DEBUG_INFO(priv, "echo testing pass\n"); - return ret; -} - -static ssize_t iwl_dbgfs_echo_test_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - iwl_cmd_echo_test(priv); - return count; -} - -#ifdef CONFIG_IWLWIFI_DEBUG -static ssize_t iwl_dbgfs_log_event_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char *buf = NULL; - ssize_t ret; - - ret = iwl_dump_nic_event_log(priv, true, &buf); - if (ret > 0) - ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_log_event_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - u32 event_log_flag; - char buf[8]; - int buf_size; - - /* check that the interface is up */ - if (!iwl_is_ready(priv)) - return -EAGAIN; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &event_log_flag) != 1) - return -EFAULT; - if (event_log_flag == 1) - iwl_dump_nic_event_log(priv, true, NULL); - - return count; -} -#endif - -static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[120]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, - "Sensitivity calibrations %s\n", - (priv->calib_disabled & - IWL_SENSITIVITY_CALIB_DISABLED) ? - "DISABLED" : "ENABLED"); - pos += scnprintf(buf + pos, bufsz - pos, - "Chain noise calibrations %s\n", - (priv->calib_disabled & - IWL_CHAIN_NOISE_CALIB_DISABLED) ? - "DISABLED" : "ENABLED"); - pos += scnprintf(buf + pos, bufsz - pos, - "Tx power calibrations %s\n", - (priv->calib_disabled & - IWL_TX_POWER_CALIB_DISABLED) ? - "DISABLED" : "ENABLED"); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - u32 calib_disabled; - int buf_size; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &calib_disabled) != 1) - return -EFAULT; - - priv->calib_disabled = calib_disabled; - - return count; -} - -static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - bool restart_fw = iwlwifi_mod_params.restart_fw; - int ret; - - iwlwifi_mod_params.restart_fw = true; - - mutex_lock(&priv->mutex); - - /* take the return value to make compiler happy - it will fail anyway */ - ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL); - - mutex_unlock(&priv->mutex); - - iwlwifi_mod_params.restart_fw = restart_fw; - - return count; -} - -DEBUGFS_READ_FILE_OPS(ucode_rx_stats); -DEBUGFS_READ_FILE_OPS(ucode_tx_stats); -DEBUGFS_READ_FILE_OPS(ucode_general_stats); -DEBUGFS_READ_FILE_OPS(sensitivity); -DEBUGFS_READ_FILE_OPS(chain_noise); -DEBUGFS_READ_FILE_OPS(power_save_status); -DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); -DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); -DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); -DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); -DEBUGFS_READ_WRITE_FILE_OPS(rf_reset); -DEBUGFS_READ_FILE_OPS(rxon_flags); -DEBUGFS_READ_FILE_OPS(rxon_filter_flags); -DEBUGFS_WRITE_FILE_OPS(txfifo_flush); -DEBUGFS_READ_FILE_OPS(ucode_bt_stats); -DEBUGFS_READ_FILE_OPS(bt_traffic); -DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); -DEBUGFS_READ_FILE_OPS(reply_tx_error); -DEBUGFS_WRITE_FILE_OPS(echo_test); -DEBUGFS_WRITE_FILE_OPS(fw_restart); -#ifdef CONFIG_IWLWIFI_DEBUG -DEBUGFS_READ_WRITE_FILE_OPS(log_event); -#endif -DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); - -/* - * Create the debugfs files and directories - * - */ -int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) -{ - struct dentry *dir_data, *dir_rf, *dir_debug; - - priv->debugfs_dir = dbgfs_dir; - - dir_data = debugfs_create_dir("data", dbgfs_dir); - if (!dir_data) - goto err; - dir_rf = debugfs_create_dir("rf", dbgfs_dir); - if (!dir_rf) - goto err; - dir_debug = debugfs_create_dir("debug", dbgfs_dir); - if (!dir_debug) - goto err; - - DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); - - DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR); -#ifdef CONFIG_IWLWIFI_DEBUG - DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); -#endif - - if (iwl_advanced_bt_coexist(priv)) - DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); - - /* Calibrations disabled/enabled status*/ - DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); - - /* - * Create a symlink with mac80211. This is not very robust, as it does - * not remove the symlink created. The implicit assumption is that - * when the opmode exits, mac80211 will also exit, and will remove - * this symlink as part of its cleanup. - */ - if (priv->mac80211_registered) { - char buf[100]; - struct dentry *mac80211_dir, *dev_dir, *root_dir; - - dev_dir = dbgfs_dir->d_parent; - root_dir = dev_dir->d_parent; - mac80211_dir = priv->hw->wiphy->debugfsdir; - - snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name, - dev_dir->d_name.name); - - if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf)) - goto err; - } - - return 0; - -err: - IWL_ERR(priv, "failed to create the dvm debugfs entries\n"); - return -ENOMEM; -} diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h deleted file mode 100644 index 0ba3e56d6015..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ /dev/null @@ -1,949 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -/* - * Please use this file (dev.h) for driver implementation definitions. - * Please use commands.h for uCode API definitions. - */ - -#ifndef __iwl_dev_h__ -#define __iwl_dev_h__ - -#include -#include -#include -#include -#include -#include - -#include "iwl-fw.h" -#include "iwl-eeprom-parse.h" -#include "iwl-csr.h" -#include "iwl-debug.h" -#include "iwl-agn-hw.h" -#include "iwl-op-mode.h" -#include "iwl-notif-wait.h" -#include "iwl-trans.h" - -#include "led.h" -#include "power.h" -#include "rs.h" -#include "tt.h" - -/* CT-KILL constants */ -#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ -#define CT_KILL_THRESHOLD 114 /* in Celsius */ -#define CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */ - -/* Default noise level to report when noise measurement is not available. - * This may be because we're: - * 1) Not associated no beacon statistics being sent to driver) - * 2) Scanning (noise measurement does not apply to associated channel) - * Use default noise value of -127 ... this is below the range of measurable - * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user. - * Also, -127 works better than 0 when averaging frames with/without - * noise info (e.g. averaging might be done in app); measured dBm values are - * always negative ... using a negative value as the default keeps all - * averages within an s8's (used in some apps) range of negative values. */ -#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) - -/* - * RTS threshold here is total size [2347] minus 4 FCS bytes - * Per spec: - * a value of 0 means RTS on all data/management packets - * a value > max MSDU size means no RTS - * else RTS for data/management frames where MPDU is larger - * than RTS value. - */ -#define DEFAULT_RTS_THRESHOLD 2347U -#define MIN_RTS_THRESHOLD 0U -#define MAX_RTS_THRESHOLD 2347U -#define MAX_MSDU_SIZE 2304U -#define MAX_MPDU_SIZE 2346U -#define DEFAULT_BEACON_INTERVAL 200U -#define DEFAULT_SHORT_RETRY_LIMIT 7U -#define DEFAULT_LONG_RETRY_LIMIT 4U - -#define IWL_NUM_SCAN_RATES (2) - - -#define IEEE80211_DATA_LEN 2304 -#define IEEE80211_4ADDR_LEN 30 -#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 -#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 -#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 - -#define IWL_SUPPORTED_RATES_IE_LEN 8 - -#define IWL_INVALID_RATE 0xFF -#define IWL_INVALID_VALUE -1 - -union iwl_ht_rate_supp { - u16 rates; - struct { - u8 siso_rate; - u8 mimo_rate; - }; -}; - -struct iwl_ht_config { - bool single_chain_sufficient; - enum ieee80211_smps_mode smps; /* current smps mode */ -}; - -/* QoS structures */ -struct iwl_qos_info { - int qos_active; - struct iwl_qosparam_cmd def_qos_parm; -}; - -/** - * enum iwl_agg_state - * - * The state machine of the BA agreement establishment / tear down. - * These states relate to a specific RA / TID. - * - * @IWL_AGG_OFF: aggregation is not used - * @IWL_AGG_STARTING: aggregation are starting (between start and oper) - * @IWL_AGG_ON: aggregation session is up - * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - */ -enum iwl_agg_state { - IWL_AGG_OFF = 0, - IWL_AGG_STARTING, - IWL_AGG_ON, - IWL_EMPTYING_HW_QUEUE_ADDBA, - IWL_EMPTYING_HW_QUEUE_DELBA, -}; - -/** - * struct iwl_ht_agg - aggregation state machine - - * This structs holds the states for the BA agreement establishment and tear - * down. It also holds the state during the BA session itself. This struct is - * duplicated for each RA / TID. - - * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the - * Tx response (REPLY_TX), and the block ack notification - * (REPLY_COMPRESSED_BA). - * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session - * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or - * the first packet to be sent in legacy HW queue in Tx AGG stop flow. - * Basically when next_reclaimed reaches ssn, we can tell mac80211 that - * we are ready to finish the Tx AGG stop / start flow. - * @wait_for_ba: Expect block-ack before next Tx reply - */ -struct iwl_ht_agg { - u32 rate_n_flags; - enum iwl_agg_state state; - u16 txq_id; - u16 ssn; - bool wait_for_ba; -}; - -/** - * struct iwl_tid_data - one for each RA / TID - - * This structs holds the states for each RA / TID. - - * @seq_number: the next WiFi sequence number to use - * @next_reclaimed: the WiFi sequence number of the next packet to be acked. - * This is basically (last acked packet++). - * @agg: aggregation state machine - */ -struct iwl_tid_data { - u16 seq_number; - u16 next_reclaimed; - struct iwl_ht_agg agg; -}; - -/* - * Structure should be accessed with sta_lock held. When station addition - * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only - * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock - * held. - */ -struct iwl_station_entry { - struct iwl_addsta_cmd sta; - u8 used, ctxid; - struct iwl_link_quality_cmd *lq; -}; - -/* - * iwl_station_priv: Driver's private station information - * - * When mac80211 creates a station it reserves some space (hw->sta_data_size) - * in the structure for use by driver. This structure is places in that - * space. - */ -struct iwl_station_priv { - struct iwl_rxon_context *ctx; - struct iwl_lq_sta lq_sta; - atomic_t pending_frames; - bool client; - bool asleep; - u8 max_agg_bufsize; - u8 sta_id; -}; - -/** - * struct iwl_vif_priv - driver's private per-interface information - * - * When mac80211 allocates a virtual interface, it can allocate - * space for us to put data into. - */ -struct iwl_vif_priv { - struct iwl_rxon_context *ctx; - u8 ibss_bssid_sta_id; -}; - -struct iwl_sensitivity_ranges { - u16 min_nrg_cck; - - u16 nrg_th_cck; - u16 nrg_th_ofdm; - - u16 auto_corr_min_ofdm; - u16 auto_corr_min_ofdm_mrc; - u16 auto_corr_min_ofdm_x1; - u16 auto_corr_min_ofdm_mrc_x1; - - u16 auto_corr_max_ofdm; - u16 auto_corr_max_ofdm_mrc; - u16 auto_corr_max_ofdm_x1; - u16 auto_corr_max_ofdm_mrc_x1; - - u16 auto_corr_max_cck; - u16 auto_corr_max_cck_mrc; - u16 auto_corr_min_cck; - u16 auto_corr_min_cck_mrc; - - u16 barker_corr_th_min; - u16 barker_corr_th_min_mrc; - u16 nrg_th_cca; -}; - - -#define KELVIN_TO_CELSIUS(x) ((x)-273) -#define CELSIUS_TO_KELVIN(x) ((x)+273) - - -/****************************************************************************** - * - * Functions implemented in core module which are forward declared here - * for use by iwl-[4-5].c - * - * NOTE: The implementation of these functions are not hardware specific - * which is why they are in the core module files. - * - * Naming convention -- - * iwl_ <-- Is part of iwlwifi - * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) - * - ****************************************************************************/ -void iwl_update_chain_flags(struct iwl_priv *priv); -extern const u8 iwl_bcast_addr[ETH_ALEN]; - -#define IWL_OPERATION_MODE_AUTO 0 -#define IWL_OPERATION_MODE_HT_ONLY 1 -#define IWL_OPERATION_MODE_MIXED 2 -#define IWL_OPERATION_MODE_20MHZ 3 - -#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 - -/* Sensitivity and chain noise calibration */ -#define INITIALIZATION_VALUE 0xFFFF -#define IWL_CAL_NUM_BEACONS 16 -#define MAXIMUM_ALLOWED_PATHLOSS 15 - -#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 - -#define MAX_FA_OFDM 50 -#define MIN_FA_OFDM 5 -#define MAX_FA_CCK 50 -#define MIN_FA_CCK 5 - -#define AUTO_CORR_STEP_OFDM 1 - -#define AUTO_CORR_STEP_CCK 3 -#define AUTO_CORR_MAX_TH_CCK 160 - -#define NRG_DIFF 2 -#define NRG_STEP_CCK 2 -#define NRG_MARGIN 8 -#define MAX_NUMBER_CCK_NO_FA 100 - -#define AUTO_CORR_CCK_MIN_VAL_DEF (125) - -#define CHAIN_A 0 -#define CHAIN_B 1 -#define CHAIN_C 2 -#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 -#define ALL_BAND_FILTER 0xFF00 -#define IN_BAND_FILTER 0xFF -#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF - -#define NRG_NUM_PREV_STAT_L 20 -#define NUM_RX_CHAINS 3 - -enum iwlagn_false_alarm_state { - IWL_FA_TOO_MANY = 0, - IWL_FA_TOO_FEW = 1, - IWL_FA_GOOD_RANGE = 2, -}; - -enum iwlagn_chain_noise_state { - IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ - IWL_CHAIN_NOISE_ACCUMULATE, - IWL_CHAIN_NOISE_CALIBRATED, - IWL_CHAIN_NOISE_DONE, -}; - -/* Sensitivity calib data */ -struct iwl_sensitivity_data { - u32 auto_corr_ofdm; - u32 auto_corr_ofdm_mrc; - u32 auto_corr_ofdm_x1; - u32 auto_corr_ofdm_mrc_x1; - u32 auto_corr_cck; - u32 auto_corr_cck_mrc; - - u32 last_bad_plcp_cnt_ofdm; - u32 last_fa_cnt_ofdm; - u32 last_bad_plcp_cnt_cck; - u32 last_fa_cnt_cck; - - u32 nrg_curr_state; - u32 nrg_prev_state; - u32 nrg_value[10]; - u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; - u32 nrg_silence_ref; - u32 nrg_energy_idx; - u32 nrg_silence_idx; - u32 nrg_th_cck; - s32 nrg_auto_corr_silence_diff; - u32 num_in_cck_no_fa; - u32 nrg_th_ofdm; - - u16 barker_corr_th_min; - u16 barker_corr_th_min_mrc; - u16 nrg_th_cca; -}; - -/* Chain noise (differential Rx gain) calib data */ -struct iwl_chain_noise_data { - u32 active_chains; - u32 chain_noise_a; - u32 chain_noise_b; - u32 chain_noise_c; - u32 chain_signal_a; - u32 chain_signal_b; - u32 chain_signal_c; - u16 beacon_count; - u8 disconn_array[NUM_RX_CHAINS]; - u8 delta_gain_code[NUM_RX_CHAINS]; - u8 radio_write; - u8 state; -}; - -enum { - MEASUREMENT_READY = (1 << 0), - MEASUREMENT_ACTIVE = (1 << 1), -}; - -/* reply_tx_statistics (for _agn devices) */ -struct reply_tx_error_statistics { - u32 pp_delay; - u32 pp_few_bytes; - u32 pp_bt_prio; - u32 pp_quiet_period; - u32 pp_calc_ttak; - u32 int_crossed_retry; - u32 short_limit; - u32 long_limit; - u32 fifo_underrun; - u32 drain_flow; - u32 rfkill_flush; - u32 life_expire; - u32 dest_ps; - u32 host_abort; - u32 bt_retry; - u32 sta_invalid; - u32 frag_drop; - u32 tid_disable; - u32 fifo_flush; - u32 insuff_cf_poll; - u32 fail_hw_drop; - u32 sta_color_mismatch; - u32 unknown; -}; - -/* reply_agg_tx_statistics (for _agn devices) */ -struct reply_agg_tx_error_statistics { - u32 underrun; - u32 bt_prio; - u32 few_bytes; - u32 abort; - u32 last_sent_ttl; - u32 last_sent_try; - u32 last_sent_bt_kill; - u32 scd_query; - u32 bad_crc32; - u32 response; - u32 dump_tx; - u32 delay_tx; - u32 unknown; -}; - -/* - * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds - * to perform continuous uCode event logging operation if enabled - */ -#define UCODE_TRACE_PERIOD (10) - -/* - * iwl_event_log: current uCode event log position - * - * @ucode_trace: enable/disable ucode continuous trace timer - * @num_wraps: how many times the event buffer wraps - * @next_entry: the entry just before the next one that uCode would fill - * @non_wraps_count: counter for no wrap detected when dump ucode events - * @wraps_once_count: counter for wrap once detected when dump ucode events - * @wraps_more_count: counter for wrap more than once detected - * when dump ucode events - */ -struct iwl_event_log { - bool ucode_trace; - u32 num_wraps; - u32 next_entry; - int non_wraps_count; - int wraps_once_count; - int wraps_more_count; -}; - -#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) - -/* BT Antenna Coupling Threshold (dB) */ -#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) - -/* Firmware reload counter and Timestamp */ -#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */ -#define IWL_MAX_CONTINUE_RELOAD_CNT 4 - - -struct iwl_rf_reset { - int reset_request_count; - int reset_success_count; - int reset_reject_count; - unsigned long last_reset_jiffies; -}; - -enum iwl_rxon_context_id { - IWL_RXON_CTX_BSS, - IWL_RXON_CTX_PAN, - - NUM_IWL_RXON_CTX -}; - -/* extend beacon time format bit shifting */ -/* - * for _agn devices - * bits 31:22 - extended - * bits 21:0 - interval - */ -#define IWLAGN_EXT_BEACON_TIME_POS 22 - -struct iwl_rxon_context { - struct ieee80211_vif *vif; - - u8 mcast_queue; - u8 ac_to_queue[IEEE80211_NUM_ACS]; - u8 ac_to_fifo[IEEE80211_NUM_ACS]; - - /* - * We could use the vif to indicate active, but we - * also need it to be active during disabling when - * we already removed the vif for type setting. - */ - bool always_active, is_active; - - bool ht_need_multiple_chains; - - enum iwl_rxon_context_id ctxid; - - u32 interface_modes, exclusive_interface_modes; - u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; - - /* - * We declare this const so it can only be - * changed via explicit cast within the - * routines that actually update the physical - * hardware. - */ - const struct iwl_rxon_cmd active; - struct iwl_rxon_cmd staging; - - struct iwl_rxon_time_cmd timing; - - struct iwl_qos_info qos_data; - - u8 bcast_sta_id, ap_sta_id; - - u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; - u8 qos_cmd; - u8 wep_key_cmd; - - struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; - u8 key_mapping_keys; - - __le32 station_flags; - - int beacon_int; - - struct { - bool non_gf_sta_present; - u8 protection; - bool enabled, is_40mhz; - u8 extension_chan_offset; - } ht; -}; - -enum iwl_scan_type { - IWL_SCAN_NORMAL, - IWL_SCAN_RADIO_RESET, -}; - -/** - * struct iwl_hw_params - * - * Holds the module parameters - * - * @tx_chains_num: Number of TX chains - * @rx_chains_num: Number of RX chains - * @ct_kill_threshold: temperature threshold - in hw dependent unit - * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit - * relevant for 1000, 6000 and up - * @struct iwl_sensitivity_ranges: range of sensitivity values - * @use_rts_for_aggregation: use rts/cts protection for HT traffic - */ -struct iwl_hw_params { - u8 tx_chains_num; - u8 rx_chains_num; - bool use_rts_for_aggregation; - u32 ct_kill_threshold; - u32 ct_kill_exit_threshold; - - const struct iwl_sensitivity_ranges *sens; -}; - -/** - * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters - * @advanced_bt_coexist: support advanced bt coexist - * @bt_init_traffic_load: specify initial bt traffic load - * @bt_prio_boost: default bt priority boost value - * @agg_time_limit: maximum number of uSec in aggregation - * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode - */ -struct iwl_dvm_bt_params { - bool advanced_bt_coexist; - u8 bt_init_traffic_load; - u32 bt_prio_boost; - u16 agg_time_limit; - bool bt_sco_disable; - bool bt_session_2; -}; - -/** - * struct iwl_dvm_cfg - DVM firmware specific device configuration - * @set_hw_params: set hardware parameters - * @set_channel_switch: send channel switch command - * @nic_config: apply device specific configuration - * @temperature: read temperature - * @adv_thermal_throttle: support advance thermal throttle - * @support_ct_kill_exit: support ct kill exit condition - * @plcp_delta_threshold: plcp error rate threshold used to trigger - * radio tuning when there is a high receiving plcp error rate - * @chain_noise_scale: default chain noise scale used for gain computation - * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up - * @no_idle_support: do not support idle mode - * @bt_params: pointer to BT parameters - * @need_temp_offset_calib: need to perform temperature offset calibration - * @no_xtal_calib: some devices do not need crystal calibration data, - * don't send it to those - * @temp_offset_v2: support v2 of temperature offset calibration - * @adv_pm: advanced power management - */ -struct iwl_dvm_cfg { - void (*set_hw_params)(struct iwl_priv *priv); - int (*set_channel_switch)(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch); - void (*nic_config)(struct iwl_priv *priv); - void (*temperature)(struct iwl_priv *priv); - - const struct iwl_dvm_bt_params *bt_params; - s32 chain_noise_scale; - u8 plcp_delta_threshold; - bool adv_thermal_throttle; - bool support_ct_kill_exit; - bool hd_v2; - bool no_idle_support; - bool need_temp_offset_calib; - bool no_xtal_calib; - bool temp_offset_v2; - bool adv_pm; -}; - -struct iwl_wipan_noa_data { - struct rcu_head rcu_head; - u32 length; - u8 data[]; -}; - -/* Calibration disabling bit mask */ -enum { - IWL_CALIB_ENABLE_ALL = 0, - - IWL_SENSITIVITY_CALIB_DISABLED = BIT(0), - IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1), - IWL_TX_POWER_CALIB_DISABLED = BIT(2), - - IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF, -}; - -#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \ - ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific)) - -#define IWL_MAC80211_GET_DVM(_hw) \ - ((struct iwl_priv *) ((struct iwl_op_mode *) \ - (_hw)->priv)->op_mode_specific) - -struct iwl_priv { - - struct iwl_trans *trans; - struct device *dev; /* for debug prints only */ - const struct iwl_cfg *cfg; - const struct iwl_fw *fw; - const struct iwl_dvm_cfg *lib; - unsigned long status; - - spinlock_t sta_lock; - struct mutex mutex; - - unsigned long transport_queue_stop; - bool passive_no_rx; -#define IWL_INVALID_MAC80211_QUEUE 0xff - u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; - atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; - - unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - - /* ieee device used by generic ieee processing code */ - struct ieee80211_hw *hw; - - struct napi_struct *napi; - - struct list_head calib_results; - - struct workqueue_struct *workqueue; - - struct iwl_hw_params hw_params; - - enum ieee80211_band band; - u8 valid_contexts; - - void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb); - - struct iwl_notif_wait_data notif_wait; - - /* spectrum measurement report caching */ - struct iwl_spectrum_notification measure_report; - u8 measurement_status; - - /* ucode beacon time */ - u32 ucode_beacon_time; - int missed_beacon_threshold; - - /* track IBSS manager (last beacon) status */ - u32 ibss_manager; - - /* jiffies when last recovery from statistics was performed */ - unsigned long rx_statistics_jiffies; - - /*counters */ - u32 rx_handlers_stats[REPLY_MAX]; - - /* rf reset */ - struct iwl_rf_reset rf_reset; - - /* firmware reload counter and timestamp */ - unsigned long reload_jiffies; - int reload_count; - bool ucode_loaded; - - u8 plcp_delta_threshold; - - /* thermal calibration */ - s32 temperature; /* Celsius */ - s32 last_temperature; - - struct iwl_wipan_noa_data __rcu *noa_data; - - /* Scan related variables */ - unsigned long scan_start; - unsigned long scan_start_tsf; - void *scan_cmd; - enum ieee80211_band scan_band; - struct cfg80211_scan_request *scan_request; - struct ieee80211_vif *scan_vif; - enum iwl_scan_type scan_type; - u8 scan_tx_ant[IEEE80211_NUM_BANDS]; - u8 mgmt_tx_ant; - - /* max number of station keys */ - u8 sta_key_max_num; - - bool new_scan_threshold_behaviour; - - bool wowlan; - - /* EEPROM MAC addresses */ - struct mac_address addresses[2]; - - struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; - - __le16 switch_channel; - - u8 start_calib; - struct iwl_sensitivity_data sensitivity_data; - struct iwl_chain_noise_data chain_noise_data; - __le16 sensitivity_tbl[HD_TABLE_SIZE]; - __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES]; - - struct iwl_ht_config current_ht_config; - - /* Rate scaling data */ - u8 retry_rate; - - int activity_timer_active; - - struct iwl_power_mgr power_data; - struct iwl_tt_mgmt thermal_throttle; - - /* station table variables */ - int num_stations; - struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; - unsigned long ucode_key_table; - struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; - atomic_t num_aux_in_flight; - - u8 mac80211_registered; - - /* Indication if ieee80211_ops->open has been called */ - u8 is_open; - - enum nl80211_iftype iw_mode; - - /* Last Rx'd beacon timestamp */ - u64 timestamp; - - struct { - __le32 flag; - struct statistics_general_common common; - struct statistics_rx_non_phy rx_non_phy; - struct statistics_rx_phy rx_ofdm; - struct statistics_rx_ht_phy rx_ofdm_ht; - struct statistics_rx_phy rx_cck; - struct statistics_tx tx; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct statistics_bt_activity bt_activity; - __le32 num_bt_kills, accum_num_bt_kills; -#endif - spinlock_t lock; - } statistics; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct { - struct statistics_general_common common; - struct statistics_rx_non_phy rx_non_phy; - struct statistics_rx_phy rx_ofdm; - struct statistics_rx_ht_phy rx_ofdm_ht; - struct statistics_rx_phy rx_cck; - struct statistics_tx tx; - struct statistics_bt_activity bt_activity; - } accum_stats, delta_stats, max_delta_stats; -#endif - - /* - * reporting the number of tids has AGG on. 0 means - * no AGGREGATION - */ - u8 agg_tids_count; - - struct iwl_rx_phy_res last_phy_res; - u32 ampdu_ref; - bool last_phy_res_valid; - - /* - * chain noise reset and gain commands are the - * two extra calibration commands follows the standard - * phy calibration commands - */ - u8 phy_calib_chain_noise_reset_cmd; - u8 phy_calib_chain_noise_gain_cmd; - - /* counts reply_tx error */ - struct reply_tx_error_statistics reply_tx_stats; - struct reply_agg_tx_error_statistics reply_agg_tx_stats; - - /* bt coex */ - u8 bt_enable_flag; - u8 bt_status; - u8 bt_traffic_load, last_bt_traffic_load; - bool bt_ch_announce; - bool bt_full_concurrent; - bool bt_ant_couple_ok; - __le32 kill_ack_mask; - __le32 kill_cts_mask; - __le16 bt_valid; - bool reduced_txpower; - u16 bt_on_thresh; - u16 bt_duration; - u16 dynamic_frag_thresh; - u8 bt_ci_compliance; - struct work_struct bt_traffic_change_work; - bool bt_enable_pspoll; - struct iwl_rxon_context *cur_rssi_ctx; - bool bt_is_sco; - - struct work_struct restart; - struct work_struct scan_completed; - struct work_struct abort_scan; - - struct work_struct beacon_update; - struct iwl_rxon_context *beacon_ctx; - struct sk_buff *beacon_skb; - void *beacon_cmd; - - struct work_struct tt_work; - struct work_struct ct_enter; - struct work_struct ct_exit; - struct work_struct start_internal_scan; - struct work_struct tx_flush; - struct work_struct bt_full_concurrency; - struct work_struct bt_runtime_config; - - struct delayed_work scan_check; - - /* TX Power settings */ - s8 tx_power_user_lmt; - s8 tx_power_next; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* debugfs */ - struct dentry *debugfs_dir; - u32 dbgfs_sram_offset, dbgfs_sram_len; - bool disable_ht40; - void *wowlan_sram; -#endif /* CONFIG_IWLWIFI_DEBUGFS */ - - struct iwl_nvm_data *nvm_data; - /* eeprom blob for debugfs */ - u8 *eeprom_blob; - size_t eeprom_blob_size; - - struct work_struct txpower_work; - u32 calib_disabled; - struct work_struct run_time_calib_work; - struct timer_list statistics_periodic; - struct timer_list ucode_trace; - - struct iwl_event_log event_log; - -#ifdef CONFIG_IWLWIFI_LEDS - struct led_classdev led; - unsigned long blink_on, blink_off; - bool led_registered; -#endif - - /* WoWLAN GTK rekey data */ - u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; - __le64 replay_ctr; - __le16 last_seq_ctl; - bool have_rekey_data; -#ifdef CONFIG_PM_SLEEP - struct wiphy_wowlan_support wowlan_support; -#endif - - /* device_pointers: pointers to ucode event tables */ - struct { - u32 error_event_table; - u32 log_event_table; - } device_pointers; - - /* indicator of loaded ucode image */ - enum iwl_ucode_type cur_ucode; -}; /*iwl_priv */ - -static inline struct iwl_rxon_context * -iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) -{ - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - return vif_priv->ctx; -} - -#define for_each_context(priv, ctx) \ - for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ - ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ - if (priv->valid_contexts & BIT(ctx->ctxid)) - -static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) -{ - return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; -} - -static inline int iwl_is_associated(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid) -{ - return iwl_is_associated_ctx(&priv->contexts[ctxid]); -} - -static inline int iwl_is_any_associated(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - for_each_context(priv, ctx) - if (iwl_is_associated_ctx(ctx)) - return true; - return false; -} - -#endif /* __iwl_dev_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c deleted file mode 100644 index 34b41e5f7cfc..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/devices.c +++ /dev/null @@ -1,690 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -/* - * DVM device-specific data & functions - */ -#include "iwl-io.h" -#include "iwl-prph.h" -#include "iwl-eeprom-parse.h" - -#include "agn.h" -#include "dev.h" -#include "commands.h" - - -/* - * 1000 series - * =========== - */ - -/* - * For 1000, use advance thermal throttling critical temperature threshold, - * but legacy thermal management implementation for now. - * This is for the reason of 1000 uCode using advance thermal throttling API - * but not implement ct_kill_exit based on ct_kill exit temperature - * so the thermal throttling will still based on legacy thermal throttling - * management. - * The code here need to be modified once 1000 uCode has the advanced thermal - * throttling algorithm in place - */ -static void iwl1000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; - priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -/* NIC configuration for 1000 series */ -static void iwl1000_nic_config(struct iwl_priv *priv) -{ - /* Setting digital SVR for 1000 card to 1.32V */ - /* locking is acquired in iwl_set_bits_mask_prph() function */ - iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG, - APMG_SVR_DIGITAL_VOLTAGE_1_32, - ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); -} - -/** - * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time - * @priv -- pointer to iwl_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv, - u16 tsf_bits) -{ - return (1 << tsf_bits) - 1; -} - -/** - * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time - * @priv -- pointer to iwl_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv, - u16 tsf_bits) -{ - return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; -} - -/* - * extended beacon time format - * time in usec will be changed into a 32-bit value in extended:internal format - * the extended part is the beacon counts - * the internal part is the time in usec within one beacon interval - */ -static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, - u32 beacon_interval) -{ - u32 quot; - u32 rem; - u32 interval = beacon_interval * TIME_UNIT; - - if (!interval || !usec) - return 0; - - quot = (usec / interval) & - (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >> - IWLAGN_EXT_BEACON_TIME_POS); - rem = (usec % interval) & iwl_beacon_time_mask_low(priv, - IWLAGN_EXT_BEACON_TIME_POS); - - return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem; -} - -/* base is usually what we get from ucode with each received frame, - * the same as HW timer counter counting down - */ -static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, - u32 addon, u32 beacon_interval) -{ - u32 base_low = base & iwl_beacon_time_mask_low(priv, - IWLAGN_EXT_BEACON_TIME_POS); - u32 addon_low = addon & iwl_beacon_time_mask_low(priv, - IWLAGN_EXT_BEACON_TIME_POS); - u32 interval = beacon_interval * TIME_UNIT; - u32 res = (base & iwl_beacon_time_mask_high(priv, - IWLAGN_EXT_BEACON_TIME_POS)) + - (addon & iwl_beacon_time_mask_high(priv, - IWLAGN_EXT_BEACON_TIME_POS)); - - if (base_low > addon_low) - res += base_low - addon_low; - else if (base_low < addon_low) { - res += interval + base_low - addon_low; - res += (1 << IWLAGN_EXT_BEACON_TIME_POS); - } else - res += (1 << IWLAGN_EXT_BEACON_TIME_POS); - - return cpu_to_le32(res); -} - -static const struct iwl_sensitivity_ranges iwl1000_sensitivity = { - .min_nrg_cck = 95, - .auto_corr_min_ofdm = 90, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 120, - .auto_corr_min_ofdm_mrc_x1 = 240, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - .auto_corr_max_ofdm_x1 = 155, - .auto_corr_max_ofdm_mrc_x1 = 290, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 170, - .auto_corr_max_cck_mrc = 400, - .nrg_th_cck = 95, - .nrg_th_ofdm = 95, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) -{ - iwl1000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - priv->hw_params.sens = &iwl1000_sensitivity; -} - -const struct iwl_dvm_cfg iwl_dvm_1000_cfg = { - .set_hw_params = iwl1000_hw_set_hw_params, - .nic_config = iwl1000_nic_config, - .temperature = iwlagn_temperature, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, -}; - - -/* - * 2000 series - * =========== - */ - -static void iwl2000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; - priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -/* NIC configuration for 2000 series */ -static void iwl2000_nic_config(struct iwl_priv *priv) -{ - iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); -} - -static const struct iwl_sensitivity_ranges iwl2000_sensitivity = { - .min_nrg_cck = 97, - .auto_corr_min_ofdm = 80, - .auto_corr_min_ofdm_mrc = 128, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 192, - - .auto_corr_max_ofdm = 145, - .auto_corr_max_ofdm_mrc = 232, - .auto_corr_max_ofdm_x1 = 110, - .auto_corr_max_ofdm_mrc_x1 = 232, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 175, - .auto_corr_min_cck_mrc = 160, - .auto_corr_max_cck_mrc = 310, - .nrg_th_cck = 97, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) -{ - iwl2000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - priv->hw_params.sens = &iwl2000_sensitivity; -} - -const struct iwl_dvm_cfg iwl_dvm_2000_cfg = { - .set_hw_params = iwl2000_hw_set_hw_params, - .nic_config = iwl2000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .hd_v2 = true, - .need_temp_offset_calib = true, - .temp_offset_v2 = true, -}; - -const struct iwl_dvm_cfg iwl_dvm_105_cfg = { - .set_hw_params = iwl2000_hw_set_hw_params, - .nic_config = iwl2000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .hd_v2 = true, - .need_temp_offset_calib = true, - .temp_offset_v2 = true, - .adv_pm = true, -}; - -static const struct iwl_dvm_bt_params iwl2030_bt_params = { - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32, - .bt_sco_disable = true, - .bt_session_2 = true, -}; - -const struct iwl_dvm_cfg iwl_dvm_2030_cfg = { - .set_hw_params = iwl2000_hw_set_hw_params, - .nic_config = iwl2000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .hd_v2 = true, - .bt_params = &iwl2030_bt_params, - .need_temp_offset_calib = true, - .temp_offset_v2 = true, - .adv_pm = true, -}; - -/* - * 5000 series - * =========== - */ - -/* NIC configuration for 5000 series */ -static const struct iwl_sensitivity_ranges iwl5000_sensitivity = { - .min_nrg_cck = 100, - .auto_corr_min_ofdm = 90, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 220, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - .auto_corr_max_ofdm_x1 = 120, - .auto_corr_max_ofdm_mrc_x1 = 240, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 200, - .auto_corr_max_cck_mrc = 400, - .nrg_th_cck = 100, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static const struct iwl_sensitivity_ranges iwl5150_sensitivity = { - .min_nrg_cck = 95, - .auto_corr_min_ofdm = 90, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 220, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - /* max = min for performance bug in 5150 DSP */ - .auto_corr_max_ofdm_x1 = 105, - .auto_corr_max_ofdm_mrc_x1 = 220, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 170, - .auto_corr_max_cck_mrc = 400, - .nrg_th_cck = 95, - .nrg_th_ofdm = 95, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) - -static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) -{ - u16 temperature, voltage; - - temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature); - voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage); - - /* offset = temp - volt / coeff */ - return (s32)(temperature - - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); -} - -static void iwl5150_set_ct_threshold(struct iwl_priv *priv) -{ - const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; - s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - - iwl_temp_calib_to_offset(priv); - - priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef; -} - -static void iwl5000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; -} - -static void iwl5000_hw_set_hw_params(struct iwl_priv *priv) -{ - iwl5000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - priv->hw_params.sens = &iwl5000_sensitivity; -} - -static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) -{ - iwl5150_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - priv->hw_params.sens = &iwl5150_sensitivity; -} - -static void iwl5150_temperature(struct iwl_priv *priv) -{ - u32 vt = 0; - s32 offset = iwl_temp_calib_to_offset(priv); - - vt = le32_to_cpu(priv->statistics.common.temperature); - vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; - /* now vt hold the temperature in Kelvin */ - priv->temperature = KELVIN_TO_CELSIUS(vt); - iwl_tt_handler(priv); -} - -static int iwl5000_hw_channel_switch(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch) -{ - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl5000_channel_switch_cmd cmd; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - struct iwl_host_cmd hcmd = { - .id = REPLY_CHANNEL_SWITCH, - .len = { sizeof(cmd), }, - .data = { &cmd, }, - }; - - cmd.band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->chandef.chan->hw_value; - IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", - ctx->active.channel, ch); - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { - if (switch_count > ((priv->ucode_beacon_time - tsf_low) / - beacon_interval)) { - switch_count -= (priv->ucode_beacon_time - - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_usecs_to_beacons(priv, - switch_time_in_usec, - beacon_interval); - cmd.switch_time = iwl_add_beacon_time(priv, - priv->ucode_beacon_time, - ucode_switch_time, - beacon_interval); - } - IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd.switch_time); - cmd.expect_beacon = - ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR; - - return iwl_dvm_send_cmd(priv, &hcmd); -} - -const struct iwl_dvm_cfg iwl_dvm_5000_cfg = { - .set_hw_params = iwl5000_hw_set_hw_params, - .set_channel_switch = iwl5000_hw_channel_switch, - .temperature = iwlagn_temperature, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .no_idle_support = true, -}; - -const struct iwl_dvm_cfg iwl_dvm_5150_cfg = { - .set_hw_params = iwl5150_hw_set_hw_params, - .set_channel_switch = iwl5000_hw_channel_switch, - .temperature = iwl5150_temperature, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .no_idle_support = true, - .no_xtal_calib = true, -}; - - - -/* - * 6000 series - * =========== - */ - -static void iwl6000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; - priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -/* NIC configuration for 6000 series */ -static void iwl6000_nic_config(struct iwl_priv *priv) -{ - switch (priv->cfg->device_family) { - case IWL_DEVICE_FAMILY_6005: - case IWL_DEVICE_FAMILY_6030: - case IWL_DEVICE_FAMILY_6000: - break; - case IWL_DEVICE_FAMILY_6000i: - /* 2x2 IPA phy type */ - iwl_write32(priv->trans, CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); - break; - case IWL_DEVICE_FAMILY_6050: - /* Indicate calibration version to uCode. */ - if (priv->nvm_data->calib_version >= 6) - iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); - break; - case IWL_DEVICE_FAMILY_6150: - /* Indicate calibration version to uCode. */ - if (priv->nvm_data->calib_version >= 6) - iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); - iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_6050_1x2); - break; - default: - WARN_ON(1); - } -} - -static const struct iwl_sensitivity_ranges iwl6000_sensitivity = { - .min_nrg_cck = 110, - .auto_corr_min_ofdm = 80, - .auto_corr_min_ofdm_mrc = 128, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 192, - - .auto_corr_max_ofdm = 145, - .auto_corr_max_ofdm_mrc = 232, - .auto_corr_max_ofdm_x1 = 110, - .auto_corr_max_ofdm_mrc_x1 = 232, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 175, - .auto_corr_min_cck_mrc = 160, - .auto_corr_max_cck_mrc = 310, - .nrg_th_cck = 110, - .nrg_th_ofdm = 110, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 336, - .nrg_th_cca = 62, -}; - -static void iwl6000_hw_set_hw_params(struct iwl_priv *priv) -{ - iwl6000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - priv->hw_params.sens = &iwl6000_sensitivity; - -} - -static int iwl6000_hw_channel_switch(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch) -{ - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl6000_channel_switch_cmd *cmd; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - struct iwl_host_cmd hcmd = { - .id = REPLY_CHANNEL_SWITCH, - .len = { sizeof(*cmd), }, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - int err; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - hcmd.data[0] = cmd; - - cmd->band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->chandef.chan->hw_value; - IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", - ctx->active.channel, ch); - cmd->channel = cpu_to_le16(ch); - cmd->rxon_flags = ctx->staging.flags; - cmd->rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { - if (switch_count > ((priv->ucode_beacon_time - tsf_low) / - beacon_interval)) { - switch_count -= (priv->ucode_beacon_time - - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_usecs_to_beacons(priv, - switch_time_in_usec, - beacon_interval); - cmd->switch_time = iwl_add_beacon_time(priv, - priv->ucode_beacon_time, - ucode_switch_time, - beacon_interval); - } - IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd->switch_time); - cmd->expect_beacon = - ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR; - - err = iwl_dvm_send_cmd(priv, &hcmd); - kfree(cmd); - return err; -} - -const struct iwl_dvm_cfg iwl_dvm_6000_cfg = { - .set_hw_params = iwl6000_hw_set_hw_params, - .set_channel_switch = iwl6000_hw_channel_switch, - .nic_config = iwl6000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, -}; - -const struct iwl_dvm_cfg iwl_dvm_6005_cfg = { - .set_hw_params = iwl6000_hw_set_hw_params, - .set_channel_switch = iwl6000_hw_channel_switch, - .nic_config = iwl6000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .need_temp_offset_calib = true, -}; - -const struct iwl_dvm_cfg iwl_dvm_6050_cfg = { - .set_hw_params = iwl6000_hw_set_hw_params, - .set_channel_switch = iwl6000_hw_channel_switch, - .nic_config = iwl6000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1500, -}; - -static const struct iwl_dvm_bt_params iwl6000_bt_params = { - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, - .bt_sco_disable = true, -}; - -const struct iwl_dvm_cfg iwl_dvm_6030_cfg = { - .set_hw_params = iwl6000_hw_set_hw_params, - .set_channel_switch = iwl6000_hw_channel_switch, - .nic_config = iwl6000_nic_config, - .temperature = iwlagn_temperature, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .bt_params = &iwl6000_bt_params, - .need_temp_offset_calib = true, - .adv_pm = true, -}; diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c deleted file mode 100644 index ca4d6692cc4e..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/led.c +++ /dev/null @@ -1,223 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include "iwl-io.h" -#include "iwl-trans.h" -#include "iwl-modparams.h" -#include "dev.h" -#include "agn.h" - -/* Throughput OFF time(ms) ON time (ms) - * >300 25 25 - * >200 to 300 40 40 - * >100 to 200 55 55 - * >70 to 100 65 65 - * >50 to 70 75 75 - * >20 to 50 85 85 - * >10 to 20 95 95 - * >5 to 10 110 110 - * >1 to 5 130 130 - * >0 to 1 167 167 - * <=0 SOLID ON - */ -static const struct ieee80211_tpt_blink iwl_blink[] = { - { .throughput = 0, .blink_time = 334 }, - { .throughput = 1 * 1024 - 1, .blink_time = 260 }, - { .throughput = 5 * 1024 - 1, .blink_time = 220 }, - { .throughput = 10 * 1024 - 1, .blink_time = 190 }, - { .throughput = 20 * 1024 - 1, .blink_time = 170 }, - { .throughput = 50 * 1024 - 1, .blink_time = 150 }, - { .throughput = 70 * 1024 - 1, .blink_time = 130 }, - { .throughput = 100 * 1024 - 1, .blink_time = 110 }, - { .throughput = 200 * 1024 - 1, .blink_time = 80 }, - { .throughput = 300 * 1024 - 1, .blink_time = 50 }, -}; - -/* Set led register off */ -void iwlagn_led_enable(struct iwl_priv *priv) -{ - iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); -} - -/* - * Adjust led blink rate to compensate on a MAC Clock difference on every HW - * Led blink rate analysis showed an average deviation of 20% on 5000 series - * and up. - * Need to compensate on the led on/off time per HW according to the deviation - * to achieve the desired led frequency - * The calculation is: (100-averageDeviation)/100 * blinkTime - * For code efficiency the calculation will be: - * compensation = (100 - averageDeviation) * 64 / 100 - * NewBlinkTime = (compensation * BlinkTime) / 64 - */ -static inline u8 iwl_blink_compensation(struct iwl_priv *priv, - u8 time, u16 compensation) -{ - if (!compensation) { - IWL_ERR(priv, "undefined blink compensation: " - "use pre-defined blinking time\n"); - return time; - } - - return (u8)((time * compensation) >> 6); -} - -static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_LEDS_CMD, - .len = { sizeof(struct iwl_led_cmd), }, - .data = { led_cmd, }, - .flags = CMD_ASYNC, - }; - u32 reg; - - reg = iwl_read32(priv->trans, CSR_LED_REG); - if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) - iwl_write32(priv->trans, CSR_LED_REG, - reg & CSR_LED_BSM_CTRL_MSK); - - return iwl_dvm_send_cmd(priv, &cmd); -} - -/* Set led pattern command */ -static int iwl_led_cmd(struct iwl_priv *priv, - unsigned long on, - unsigned long off) -{ - struct iwl_led_cmd led_cmd = { - .id = IWL_LED_LINK, - .interval = IWL_DEF_LED_INTRVL - }; - int ret; - - if (!test_bit(STATUS_READY, &priv->status)) - return -EBUSY; - - if (priv->blink_on == on && priv->blink_off == off) - return 0; - - if (off == 0) { - /* led is SOLID_ON */ - on = IWL_LED_SOLID; - } - - IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", - priv->cfg->base_params->led_compensation); - led_cmd.on = iwl_blink_compensation(priv, on, - priv->cfg->base_params->led_compensation); - led_cmd.off = iwl_blink_compensation(priv, off, - priv->cfg->base_params->led_compensation); - - ret = iwl_send_led_cmd(priv, &led_cmd); - if (!ret) { - priv->blink_on = on; - priv->blink_off = off; - } - return ret; -} - -static void iwl_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); - unsigned long on = 0; - - if (brightness > 0) - on = IWL_LED_SOLID; - - iwl_led_cmd(priv, on, 0); -} - -static int iwl_led_blink_set(struct led_classdev *led_cdev, - unsigned long *delay_on, - unsigned long *delay_off) -{ - struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); - - return iwl_led_cmd(priv, *delay_on, *delay_off); -} - -void iwl_leds_init(struct iwl_priv *priv) -{ - int mode = iwlwifi_mod_params.led_mode; - int ret; - - if (mode == IWL_LED_DISABLE) { - IWL_INFO(priv, "Led disabled\n"); - return; - } - if (mode == IWL_LED_DEFAULT) - mode = priv->cfg->led_mode; - - priv->led.name = kasprintf(GFP_KERNEL, "%s-led", - wiphy_name(priv->hw->wiphy)); - priv->led.brightness_set = iwl_led_brightness_set; - priv->led.blink_set = iwl_led_blink_set; - priv->led.max_brightness = 1; - - switch (mode) { - case IWL_LED_DEFAULT: - WARN_ON(1); - break; - case IWL_LED_BLINK: - priv->led.default_trigger = - ieee80211_create_tpt_led_trigger(priv->hw, - IEEE80211_TPT_LEDTRIG_FL_CONNECTED, - iwl_blink, ARRAY_SIZE(iwl_blink)); - break; - case IWL_LED_RF_STATE: - priv->led.default_trigger = - ieee80211_get_radio_led_name(priv->hw); - break; - } - - ret = led_classdev_register(priv->trans->dev, &priv->led); - if (ret) { - kfree(priv->led.name); - return; - } - - priv->led_registered = true; -} - -void iwl_leds_exit(struct iwl_priv *priv) -{ - if (!priv->led_registered) - return; - - led_classdev_unregister(&priv->led); - kfree(priv->led.name); -} diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h deleted file mode 100644 index 1c6b2252d0f2..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/led.h +++ /dev/null @@ -1,55 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_leds_h__ -#define __iwl_leds_h__ - - -struct iwl_priv; - -#define IWL_LED_SOLID 11 -#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) - -#define IWL_LED_ACTIVITY (0<<1) -#define IWL_LED_LINK (1<<1) - -#ifdef CONFIG_IWLWIFI_LEDS -void iwlagn_led_enable(struct iwl_priv *priv); -void iwl_leds_init(struct iwl_priv *priv); -void iwl_leds_exit(struct iwl_priv *priv); -#else -static inline void iwlagn_led_enable(struct iwl_priv *priv) -{ -} -static inline void iwl_leds_init(struct iwl_priv *priv) -{ -} -static inline void iwl_leds_exit(struct iwl_priv *priv) -{ -} -#endif - -#endif /* __iwl_leds_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c deleted file mode 100644 index e18629a16fb0..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ /dev/null @@ -1,1300 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include -#include -#include - -#include "iwl-io.h" -#include "iwl-agn-hw.h" -#include "iwl-trans.h" -#include "iwl-modparams.h" - -#include "dev.h" -#include "agn.h" - -int iwlagn_hw_valid_rtc_data_addr(u32 addr) -{ - return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && - (addr < IWLAGN_RTC_DATA_UPPER_BOUND); -} - -int iwlagn_send_tx_power(struct iwl_priv *priv) -{ - struct iwlagn_tx_power_dbm_cmd tx_power_cmd; - u8 tx_ant_cfg_cmd; - - if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), - "TX Power requested while scanning!\n")) - return -EAGAIN; - - /* half dBm need to multiply */ - tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); - - if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) { - /* - * For the newer devices which using enhanced/extend tx power - * table in EEPROM, the format is in half dBm. driver need to - * convert to dBm format before report to mac80211. - * By doing so, there is a possibility of 1/2 dBm resolution - * lost. driver will perform "round-up" operation before - * reporting, but it will cause 1/2 dBm tx power over the - * regulatory limit. Perform the checking here, if the - * "tx_power_user_lmt" is higher than EEPROM value (in - * half-dBm format), lower the tx power based on EEPROM - */ - tx_power_cmd.global_lmt = - priv->nvm_data->max_tx_pwr_half_dbm; - } - tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED; - tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO; - - if (IWL_UCODE_API(priv->fw->ucode_ver) == 1) - tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; - else - tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; - - return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0, - sizeof(tx_power_cmd), &tx_power_cmd); -} - -void iwlagn_temperature(struct iwl_priv *priv) -{ - lockdep_assert_held(&priv->statistics.lock); - - /* store temperature from correct statistics (in Celsius) */ - priv->temperature = le32_to_cpu(priv->statistics.common.temperature); - iwl_tt_handler(priv); -} - -int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) -{ - int idx = 0; - int band_offset = 0; - - /* HT rate format: mac80211 wants an MCS number, which is just LSB */ - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = (rate_n_flags & 0xff); - return idx; - /* Legacy rate format, search for match in table */ - } else { - if (band == IEEE80211_BAND_5GHZ) - band_offset = IWL_FIRST_OFDM_RATE; - for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) - if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) - return idx - band_offset; - } - - return -1; -} - -int iwlagn_manage_ibss_station(struct iwl_priv *priv, - struct ieee80211_vif *vif, bool add) -{ - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - if (add) - return iwlagn_add_bssid_station(priv, vif_priv->ctx, - vif->bss_conf.bssid, - &vif_priv->ibss_bssid_sta_id); - return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, - vif->bss_conf.bssid); -} - -/** - * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode - * - * pre-requirements: - * 1. acquire mutex before calling - * 2. make sure rf is on and not in exit state - */ -int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk) -{ - struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = { - .flush_control = cpu_to_le16(IWL_DROP_ALL), - }; - struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = { - .flush_control = cpu_to_le16(IWL_DROP_ALL), - }; - - u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | - IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK; - - if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) - queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK | - IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK | - IWL_PAN_SCD_MGMT_MSK | - IWL_PAN_SCD_MULTICAST_MSK; - - if (priv->nvm_data->sku_cap_11n_enable) - queue_control |= IWL_AGG_TX_QUEUE_MSK; - - if (scd_q_msk) - queue_control = scd_q_msk; - - IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control); - flush_cmd_v3.queue_control = cpu_to_le32(queue_control); - flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control); - - if (IWL_UCODE_API(priv->fw->ucode_ver) > 2) - return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, - sizeof(flush_cmd_v3), - &flush_cmd_v3); - return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, - sizeof(flush_cmd_v2), &flush_cmd_v2); -} - -void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) -{ - mutex_lock(&priv->mutex); - ieee80211_stop_queues(priv->hw); - if (iwlagn_txfifo_flush(priv, 0)) { - IWL_ERR(priv, "flush request fail\n"); - goto done; - } - IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); -done: - ieee80211_wake_queues(priv->hw); - mutex_unlock(&priv->mutex); -} - -/* - * BT coex - */ -/* Notmal TDM */ -static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0x00004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), -}; - - -/* Loose Coex */ -static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), -}; - -/* Full concurrency */ -static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), -}; - -void iwlagn_send_advance_bt_config(struct iwl_priv *priv) -{ - struct iwl_basic_bt_cmd basic = { - .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT, - .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT, - .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, - .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, - }; - struct iwl_bt_cmd_v1 bt_cmd_v1; - struct iwl_bt_cmd_v2 bt_cmd_v2; - int ret; - - BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != - sizeof(basic.bt3_lookup_table)); - - if (priv->lib->bt_params) { - /* - * newer generation of devices (2000 series and newer) - * use the version 2 of the bt command - * we need to make sure sending the host command - * with correct data structure to avoid uCode assert - */ - if (priv->lib->bt_params->bt_session_2) { - bt_cmd_v2.prio_boost = cpu_to_le32( - priv->lib->bt_params->bt_prio_boost); - bt_cmd_v2.tx_prio_boost = 0; - bt_cmd_v2.rx_prio_boost = 0; - } else { - /* older version only has 8 bits */ - WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF); - bt_cmd_v1.prio_boost = - priv->lib->bt_params->bt_prio_boost; - bt_cmd_v1.tx_prio_boost = 0; - bt_cmd_v1.rx_prio_boost = 0; - } - } else { - IWL_ERR(priv, "failed to construct BT Coex Config\n"); - return; - } - - /* - * Possible situations when BT needs to take over for receive, - * at the same time where STA needs to response to AP's frame(s), - * reduce the tx power of the required response frames, by that, - * allow the concurrent BT receive & WiFi transmit - * (BT - ANT A, WiFi -ANT B), without interference to one another - * - * Reduced tx power apply to control frames only (ACK/Back/CTS) - * when indicated by the BT config command - */ - basic.kill_ack_mask = priv->kill_ack_mask; - basic.kill_cts_mask = priv->kill_cts_mask; - if (priv->reduced_txpower) - basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR; - basic.valid = priv->bt_valid; - - /* - * Configure BT coex mode to "no coexistence" when the - * user disabled BT coexistence, we have no interface - * (might be in monitor mode), or the interface is in - * IBSS mode (no proper uCode support for coex then). - */ - if (!iwlwifi_mod_params.bt_coex_active || - priv->iw_mode == NL80211_IFTYPE_ADHOC) { - basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; - } else { - basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << - IWLAGN_BT_FLAG_COEX_MODE_SHIFT; - - if (!priv->bt_enable_pspoll) - basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; - else - basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; - - if (priv->bt_ch_announce) - basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; - IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags); - } - priv->bt_enable_flag = basic.flags; - if (priv->bt_full_concurrent) - memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup, - sizeof(iwlagn_concurrent_lookup)); - else - memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup, - sizeof(iwlagn_def_3w_lookup)); - - IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n", - basic.flags ? "active" : "disabled", - priv->bt_full_concurrent ? - "full concurrency" : "3-wire"); - - if (priv->lib->bt_params->bt_session_2) { - memcpy(&bt_cmd_v2.basic, &basic, - sizeof(basic)); - ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, - 0, sizeof(bt_cmd_v2), &bt_cmd_v2); - } else { - memcpy(&bt_cmd_v1.basic, &basic, - sizeof(basic)); - ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, - 0, sizeof(bt_cmd_v1), &bt_cmd_v1); - } - if (ret) - IWL_ERR(priv, "failed to send BT Coex Config\n"); - -} - -void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena) -{ - struct iwl_rxon_context *ctx, *found_ctx = NULL; - bool found_ap = false; - - lockdep_assert_held(&priv->mutex); - - /* Check whether AP or GO mode is active. */ - if (rssi_ena) { - for_each_context(priv, ctx) { - if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP && - iwl_is_associated_ctx(ctx)) { - found_ap = true; - break; - } - } - } - - /* - * If disable was received or If GO/AP mode, disable RSSI - * measurements. - */ - if (!rssi_ena || found_ap) { - if (priv->cur_rssi_ctx) { - ctx = priv->cur_rssi_ctx; - ieee80211_disable_rssi_reports(ctx->vif); - priv->cur_rssi_ctx = NULL; - } - return; - } - - /* - * If rssi measurements need to be enabled, consider all cases now. - * Figure out how many contexts are active. - */ - for_each_context(priv, ctx) { - if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && - iwl_is_associated_ctx(ctx)) { - found_ctx = ctx; - break; - } - } - - /* - * rssi monitor already enabled for the correct interface...nothing - * to do. - */ - if (found_ctx == priv->cur_rssi_ctx) - return; - - /* - * Figure out if rssi monitor is currently enabled, and needs - * to be changed. If rssi monitor is already enabled, disable - * it first else just enable rssi measurements on the - * interface found above. - */ - if (priv->cur_rssi_ctx) { - ctx = priv->cur_rssi_ctx; - if (ctx->vif) - ieee80211_disable_rssi_reports(ctx->vif); - } - - priv->cur_rssi_ctx = found_ctx; - - if (!found_ctx) - return; - - ieee80211_enable_rssi_reports(found_ctx->vif, - IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD, - IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD); -} - -static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg) -{ - return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3SCOESCO_POS; -} - -static void iwlagn_bt_traffic_change_work(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, bt_traffic_change_work); - struct iwl_rxon_context *ctx; - int smps_request = -1; - - if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { - /* bt coex disabled */ - return; - } - - /* - * Note: bt_traffic_load can be overridden by scan complete and - * coex profile notifications. Ignore that since only bad consequence - * can be not matching debug print with actual state. - */ - IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n", - priv->bt_traffic_load); - - switch (priv->bt_traffic_load) { - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - if (priv->bt_status) - smps_request = IEEE80211_SMPS_DYNAMIC; - else - smps_request = IEEE80211_SMPS_AUTOMATIC; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - smps_request = IEEE80211_SMPS_DYNAMIC; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - smps_request = IEEE80211_SMPS_STATIC; - break; - default: - IWL_ERR(priv, "Invalid BT traffic load: %d\n", - priv->bt_traffic_load); - break; - } - - mutex_lock(&priv->mutex); - - /* - * We can not send command to firmware while scanning. When the scan - * complete we will schedule this work again. We do check with mutex - * locked to prevent new scan request to arrive. We do not check - * STATUS_SCANNING to avoid race when queue_work two times from - * different notifications, but quit and not perform any work at all. - */ - if (test_bit(STATUS_SCAN_HW, &priv->status)) - goto out; - - iwl_update_chain_flags(priv); - - if (smps_request != -1) { - priv->current_ht_config.smps = smps_request; - for_each_context(priv, ctx) { - if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) - ieee80211_request_smps(ctx->vif, smps_request); - } - } - - /* - * Dynamic PS poll related functionality. Adjust RSSI measurements if - * necessary. - */ - iwlagn_bt_coex_rssi_monitor(priv); -out: - mutex_unlock(&priv->mutex); -} - -/* - * If BT sco traffic, and RSSI monitor is enabled, move measurements to the - * correct interface or disable it if this is the last interface to be - * removed. - */ -void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv) -{ - if (priv->bt_is_sco && - priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS) - iwlagn_bt_adjust_rssi_monitor(priv, true); - else - iwlagn_bt_adjust_rssi_monitor(priv, false); -} - -static void iwlagn_print_uartmsg(struct iwl_priv *priv, - struct iwl_bt_uart_msg *uart_msg) -{ - IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, " - "Update Req = 0x%X\n", - (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >> - BT_UART_MSG_FRAME1MSGTYPE_POS, - (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >> - BT_UART_MSG_FRAME1SSN_POS, - (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >> - BT_UART_MSG_FRAME1UPDATEREQ_POS); - - IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, " - "Chl_SeqN = 0x%X, In band = 0x%X\n", - (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >> - BT_UART_MSG_FRAME2OPENCONNECTIONS_POS, - (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >> - BT_UART_MSG_FRAME2TRAFFICLOAD_POS, - (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >> - BT_UART_MSG_FRAME2CHLSEQN_POS, - (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >> - BT_UART_MSG_FRAME2INBAND_POS); - - IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " - "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n", - (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3SCOESCO_POS, - (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3SNIFF_POS, - (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3A2DP_POS, - (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3ACL_POS, - (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3MASTER_POS, - (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >> - BT_UART_MSG_FRAME3OBEX_POS); - - IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n", - (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >> - BT_UART_MSG_FRAME4IDLEDURATION_POS); - - IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " - "eSCO Retransmissions = 0x%X\n", - (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >> - BT_UART_MSG_FRAME5TXACTIVITY_POS, - (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >> - BT_UART_MSG_FRAME5RXACTIVITY_POS, - (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >> - BT_UART_MSG_FRAME5ESCORETRANSMIT_POS); - - IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n", - (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >> - BT_UART_MSG_FRAME6SNIFFINTERVAL_POS, - (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> - BT_UART_MSG_FRAME6DISCOVERABLE_POS); - - IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = " - "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n", - (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> - BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, - (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >> - BT_UART_MSG_FRAME7PAGE_POS, - (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >> - BT_UART_MSG_FRAME7INQUIRY_POS, - (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >> - BT_UART_MSG_FRAME7CONNECTABLE_POS); -} - -static bool iwlagn_set_kill_msk(struct iwl_priv *priv, - struct iwl_bt_uart_msg *uart_msg) -{ - bool need_update = false; - u8 kill_msk = IWL_BT_KILL_REDUCE; - static const __le32 bt_kill_ack_msg[3] = { - IWLAGN_BT_KILL_ACK_MASK_DEFAULT, - IWLAGN_BT_KILL_ACK_CTS_MASK_SCO, - IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE}; - static const __le32 bt_kill_cts_msg[3] = { - IWLAGN_BT_KILL_CTS_MASK_DEFAULT, - IWLAGN_BT_KILL_ACK_CTS_MASK_SCO, - IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE}; - - if (!priv->reduced_txpower) - kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) - ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT; - if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] || - priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) { - priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK; - priv->kill_ack_mask = bt_kill_ack_msg[kill_msk]; - priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK; - priv->kill_cts_mask = bt_kill_cts_msg[kill_msk]; - need_update = true; - } - return need_update; -} - -/* - * Upon RSSI changes, sends a bt config command with following changes - * 1. enable/disable "reduced control frames tx power - * 2. update the "kill)ack_mask" and "kill_cts_mask" - * - * If "reduced tx power" is enabled, uCode shall - * 1. ACK/Back/CTS rate shall reduced to 6Mbps - * 2. not use duplciate 20/40MHz mode - */ -static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv, - struct iwl_bt_uart_msg *uart_msg) -{ - bool need_update = false; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - int ave_rssi; - - if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) { - IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n"); - return false; - } - - ave_rssi = ieee80211_ave_rssi(ctx->vif); - if (!ave_rssi) { - /* no rssi data, no changes to reduce tx power */ - IWL_DEBUG_COEX(priv, "no rssi data available\n"); - return need_update; - } - if (!priv->reduced_txpower && - !iwl_is_associated(priv, IWL_RXON_CTX_PAN) && - (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) && - (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK | - BT_UART_MSG_FRAME3OBEX_MSK)) && - !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK | - BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) { - /* enabling reduced tx power */ - priv->reduced_txpower = true; - priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR; - need_update = true; - } else if (priv->reduced_txpower && - (iwl_is_associated(priv, IWL_RXON_CTX_PAN) || - (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) || - (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK | - BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) || - !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK | - BT_UART_MSG_FRAME3OBEX_MSK)))) { - /* disable reduced tx power */ - priv->reduced_txpower = false; - priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR; - need_update = true; - } - - return need_update; -} - -static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data; - struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; - - if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { - /* bt coex disabled */ - return; - } - - IWL_DEBUG_COEX(priv, "BT Coex notification:\n"); - IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status); - IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load); - IWL_DEBUG_COEX(priv, " CI compliance: %d\n", - coex->bt_ci_compliance); - iwlagn_print_uartmsg(priv, uart_msg); - - priv->last_bt_traffic_load = priv->bt_traffic_load; - priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg); - - if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { - if (priv->bt_status != coex->bt_status || - priv->last_bt_traffic_load != coex->bt_traffic_load) { - if (coex->bt_status) { - /* BT on */ - if (!priv->bt_ch_announce) - priv->bt_traffic_load = - IWL_BT_COEX_TRAFFIC_LOAD_HIGH; - else - priv->bt_traffic_load = - coex->bt_traffic_load; - } else { - /* BT off */ - priv->bt_traffic_load = - IWL_BT_COEX_TRAFFIC_LOAD_NONE; - } - priv->bt_status = coex->bt_status; - queue_work(priv->workqueue, - &priv->bt_traffic_change_work); - } - } - - /* schedule to send runtime bt_config */ - /* check reduce power before change ack/cts kill mask */ - if (iwlagn_fill_txpower_mode(priv, uart_msg) || - iwlagn_set_kill_msk(priv, uart_msg)) - queue_work(priv->workqueue, &priv->bt_runtime_config); - - - /* FIXME: based on notification, adjust the prio_boost */ - - priv->bt_ci_compliance = coex->bt_ci_compliance; -} - -void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) -{ - priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] = - iwlagn_bt_coex_profile_notif; -} - -void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv) -{ - INIT_WORK(&priv->bt_traffic_change_work, - iwlagn_bt_traffic_change_work); -} - -void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv) -{ - cancel_work_sync(&priv->bt_traffic_change_work); -} - -static bool is_single_rx_stream(struct iwl_priv *priv) -{ - return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || - priv->current_ht_config.single_chain_sufficient; -} - -#define IWL_NUM_RX_CHAINS_MULTIPLE 3 -#define IWL_NUM_RX_CHAINS_SINGLE 2 -#define IWL_NUM_IDLE_CHAINS_DUAL 2 -#define IWL_NUM_IDLE_CHAINS_SINGLE 1 - -/* - * Determine how many receiver/antenna chains to use. - * - * More provides better reception via diversity. Fewer saves power - * at the expense of throughput, but only when not in powersave to - * start with. - * - * MIMO (dual stream) requires at least 2, but works better with 3. - * This does not determine *which* chains to use, just how many. - */ -static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) -{ - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist && - (priv->bt_full_concurrent || - priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { - /* - * only use chain 'A' in bt high traffic load or - * full concurrency mode - */ - return IWL_NUM_RX_CHAINS_SINGLE; - } - /* # of Rx chains to use when expecting MIMO. */ - if (is_single_rx_stream(priv)) - return IWL_NUM_RX_CHAINS_SINGLE; - else - return IWL_NUM_RX_CHAINS_MULTIPLE; -} - -/* - * When we are in power saving mode, unless device support spatial - * multiplexing power save, use the active count for rx chain count. - */ -static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) -{ - /* # Rx chains when idling, depending on SMPS mode */ - switch (priv->current_ht_config.smps) { - case IEEE80211_SMPS_STATIC: - case IEEE80211_SMPS_DYNAMIC: - return IWL_NUM_IDLE_CHAINS_SINGLE; - case IEEE80211_SMPS_AUTOMATIC: - case IEEE80211_SMPS_OFF: - return active_cnt; - default: - WARN(1, "invalid SMPS mode %d", - priv->current_ht_config.smps); - return active_cnt; - } -} - -/* up to 4 chains */ -static u8 iwl_count_chain_bitmap(u32 chain_bitmap) -{ - u8 res; - res = (chain_bitmap & BIT(0)) >> 0; - res += (chain_bitmap & BIT(1)) >> 1; - res += (chain_bitmap & BIT(2)) >> 2; - res += (chain_bitmap & BIT(3)) >> 3; - return res; -} - -/** - * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image - * - * Selects how many and which Rx receivers/antennas/chains to use. - * This should not be used for scan command ... it puts data in wrong place. - */ -void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - bool is_single = is_single_rx_stream(priv); - bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); - u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; - u32 active_chains; - u16 rx_chain; - - /* Tell uCode which antennas are actually connected. - * Before first association, we assume all antennas are connected. - * Just after first association, iwl_chain_noise_calibration() - * checks which antennas actually *are* connected. */ - if (priv->chain_noise_data.active_chains) - active_chains = priv->chain_noise_data.active_chains; - else - active_chains = priv->nvm_data->valid_rx_ant; - - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist && - (priv->bt_full_concurrent || - priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { - /* - * only use chain 'A' in bt high traffic load or - * full concurrency mode - */ - active_chains = first_antenna(active_chains); - } - - rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; - - /* How many receivers should we use? */ - active_rx_cnt = iwl_get_active_rx_chain_count(priv); - idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt); - - - /* correct rx chain count according hw settings - * and chain noise calibration - */ - valid_rx_cnt = iwl_count_chain_bitmap(active_chains); - if (valid_rx_cnt < active_rx_cnt) - active_rx_cnt = valid_rx_cnt; - - if (valid_rx_cnt < idle_rx_cnt) - idle_rx_cnt = valid_rx_cnt; - - rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; - rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; - - ctx->staging.rx_chain = cpu_to_le16(rx_chain); - - if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) - ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; - else - ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; - - IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", - ctx->staging.rx_chain, - active_rx_cnt, idle_rx_cnt); - - WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || - active_rx_cnt < idle_rx_cnt); -} - -u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) -{ - int i; - u8 ind = ant; - - if (priv->band == IEEE80211_BAND_2GHZ && - priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) - return 0; - - for (i = 0; i < RATE_ANT_NUM - 1; i++) { - ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; - if (valid & BIT(ind)) - return ind; - } - return ant; -} - -#ifdef CONFIG_PM_SLEEP -static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) -{ - int i; - - for (i = 0; i < IWLAGN_P1K_SIZE; i++) - out[i] = cpu_to_le16(p1k[i]); -} - -struct wowlan_key_data { - struct iwl_rxon_context *ctx; - struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; - struct iwlagn_wowlan_tkip_params_cmd *tkip; - const u8 *bssid; - bool error, use_rsc_tsc, use_tkip; -}; - - -static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct wowlan_key_data *data = _data; - struct iwl_rxon_context *ctx = data->ctx; - struct aes_sc *aes_sc, *aes_tx_sc = NULL; - struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; - struct iwlagn_p1k_cache *rx_p1ks; - u8 *rx_mic_key; - struct ieee80211_key_seq seq; - u32 cur_rx_iv32 = 0; - u16 p1k[IWLAGN_P1K_SIZE]; - int ret, i; - - mutex_lock(&priv->mutex); - - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && - !sta && !ctx->key_mapping_keys) - ret = iwl_set_default_wep_key(priv, ctx, key); - else - ret = iwl_set_dynamic_key(priv, ctx, key, sta); - - if (ret) { - IWL_ERR(priv, "Error setting key during suspend!\n"); - data->error = true; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - if (sta) { - tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; - tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; - - rx_p1ks = data->tkip->rx_uni; - - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); - - ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); - iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); - - memcpy(data->tkip->mic_keys.tx, - &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], - IWLAGN_MIC_KEY_SIZE); - - rx_mic_key = data->tkip->mic_keys.rx_unicast; - } else { - tkip_sc = - data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; - rx_p1ks = data->tkip->rx_multi; - rx_mic_key = data->tkip->mic_keys.rx_mcast; - } - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 (as they need to to avoid replay attacks) - * for checking the IV in the frames. - */ - for (i = 0; i < IWLAGN_NUM_RSC; i++) { - ieee80211_get_key_rx_seq(key, i, &seq); - tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); - /* wrapping isn't allowed, AP must rekey */ - if (seq.tkip.iv32 > cur_rx_iv32) - cur_rx_iv32 = seq.tkip.iv32; - } - - ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); - iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); - ieee80211_get_tkip_rx_p1k(key, data->bssid, - cur_rx_iv32 + 1, p1k); - iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); - - memcpy(rx_mic_key, - &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], - IWLAGN_MIC_KEY_SIZE); - - data->use_tkip = true; - data->use_rsc_tsc = true; - break; - case WLAN_CIPHER_SUITE_CCMP: - if (sta) { - u8 *pn = seq.ccmp.pn; - - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; - aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } else - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 for checking the IV in the frames. - */ - for (i = 0; i < IWLAGN_NUM_RSC; i++) { - u8 *pn = seq.ccmp.pn; - - ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc[i].pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } - data->use_rsc_tsc = true; - break; - } - - mutex_unlock(&priv->mutex); -} - -int iwlagn_send_patterns(struct iwl_priv *priv, - struct cfg80211_wowlan *wowlan) -{ - struct iwlagn_wowlan_patterns_cmd *pattern_cmd; - struct iwl_host_cmd cmd = { - .id = REPLY_WOWLAN_PATTERNS, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - int i, err; - - if (!wowlan->n_patterns) - return 0; - - cmd.len[0] = sizeof(*pattern_cmd) + - wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); - - pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); - if (!pattern_cmd) - return -ENOMEM; - - pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); - - for (i = 0; i < wowlan->n_patterns; i++) { - int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); - - memcpy(&pattern_cmd->patterns[i].mask, - wowlan->patterns[i].mask, mask_len); - memcpy(&pattern_cmd->patterns[i].pattern, - wowlan->patterns[i].pattern, - wowlan->patterns[i].pattern_len); - pattern_cmd->patterns[i].mask_size = mask_len; - pattern_cmd->patterns[i].pattern_size = - wowlan->patterns[i].pattern_len; - } - - cmd.data[0] = pattern_cmd; - err = iwl_dvm_send_cmd(priv, &cmd); - kfree(pattern_cmd); - return err; -} - -int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan) -{ - struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; - struct iwl_rxon_cmd rxon; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; - struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; - struct iwlagn_d3_config_cmd d3_cfg_cmd = { - /* - * Program the minimum sleep time to 10 seconds, as many - * platforms have issues processing a wakeup signal while - * still being in the process of suspending. - */ - .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), - }; - struct wowlan_key_data key_data = { - .ctx = ctx, - .bssid = ctx->active.bssid_addr, - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - }; - int ret, i; - u16 seq; - - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; - - memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); - - /* - * We know the last used seqno, and the uCode expects to know that - * one, it will increment before TX. - */ - seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; - wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); - - /* - * For QoS counters, we store the one to use next, so subtract 0x10 - * since the uCode will add 0x10 before using the value. - */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - seq = priv->tid_data[IWL_AP_ID][i].seq_number; - seq -= 0x10; - wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); - } - - if (wowlan->disconnect) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | - IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); - if (wowlan->magic_pkt) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); - if (wowlan->gtk_rekey_failure) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); - if (wowlan->eap_identity_req) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); - if (wowlan->four_way_handshake) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); - if (wowlan->n_patterns) - wakeup_filter_cmd.enabled |= - cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); - - if (wowlan->rfkill_release) - d3_cfg_cmd.wakeup_flags |= - cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL); - - iwl_scan_cancel_timeout(priv, 200); - - memcpy(&rxon, &ctx->active, sizeof(rxon)); - - priv->ucode_loaded = false; - iwl_trans_stop_device(priv->trans); - - priv->wowlan = true; - - ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN); - if (ret) - goto out; - - /* now configure WoWLAN ucode */ - ret = iwl_alive_start(priv); - if (ret) - goto out; - - memcpy(&ctx->staging, &rxon, sizeof(rxon)); - ret = iwlagn_commit_rxon(priv, ctx); - if (ret) - goto out; - - ret = iwl_power_update_mode(priv, true); - if (ret) - goto out; - - if (!iwlwifi_mod_params.sw_crypto) { - /* mark all keys clear */ - priv->ucode_key_table = 0; - ctx->key_mapping_keys = 0; - - /* - * This needs to be unlocked due to lock ordering - * constraints. Since we're in the suspend path - * that isn't really a problem though. - */ - mutex_unlock(&priv->mutex); - ieee80211_iter_keys(priv->hw, ctx->vif, - iwlagn_wowlan_program_keys, - &key_data); - mutex_lock(&priv->mutex); - if (key_data.error) { - ret = -EIO; - goto out; - } - - if (key_data.use_rsc_tsc) { - struct iwl_host_cmd rsc_tsc_cmd = { - .id = REPLY_WOWLAN_TSC_RSC_PARAMS, - .data[0] = key_data.rsc_tsc, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .len[0] = sizeof(*key_data.rsc_tsc), - }; - - ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd); - if (ret) - goto out; - } - - if (key_data.use_tkip) { - ret = iwl_dvm_send_cmd_pdu(priv, - REPLY_WOWLAN_TKIP_PARAMS, - 0, sizeof(tkip_cmd), - &tkip_cmd); - if (ret) - goto out; - } - - if (priv->have_rekey_data) { - memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); - memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); - kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); - memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); - kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); - kek_kck_cmd.replay_ctr = priv->replay_ctr; - - ret = iwl_dvm_send_cmd_pdu(priv, - REPLY_WOWLAN_KEK_KCK_MATERIAL, - 0, sizeof(kek_kck_cmd), - &kek_kck_cmd); - if (ret) - goto out; - } - } - - ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0, - sizeof(d3_cfg_cmd), &d3_cfg_cmd); - if (ret) - goto out; - - ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER, - 0, sizeof(wakeup_filter_cmd), - &wakeup_filter_cmd); - if (ret) - goto out; - - ret = iwlagn_send_patterns(priv, wowlan); - out: - kfree(key_data.rsc_tsc); - return ret; -} -#endif - -int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { - IWL_WARN(priv, "Not sending command - %s KILL\n", - iwl_is_rfkill(priv) ? "RF" : "CT"); - return -EIO; - } - - if (test_bit(STATUS_FW_ERROR, &priv->status)) { - IWL_ERR(priv, "Command %s failed: FW Error\n", - iwl_dvm_get_cmd_string(cmd->id)); - return -EIO; - } - - /* - * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag - * in iwl_down but cancel the workers only later. - */ - if (!priv->ucode_loaded) { - IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); - return -EIO; - } - - /* - * Synchronous commands from this op-mode must hold - * the mutex, this ensures we don't try to send two - * (or more) synchronous commands at a time. - */ - if (!(cmd->flags & CMD_ASYNC)) - lockdep_assert_held(&priv->mutex); - - return iwl_trans_send_cmd(priv->trans, cmd); -} - -int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, - u32 flags, u16 len, const void *data) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = { len, }, - .data = { data, }, - .flags = flags, - }; - - return iwl_dvm_send_cmd(priv, &cmd); -} diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c deleted file mode 100644 index b3ad34e8bf5a..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ /dev/null @@ -1,1655 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "iwl-io.h" -#include "iwl-trans.h" -#include "iwl-op-mode.h" -#include "iwl-modparams.h" - -#include "dev.h" -#include "calib.h" -#include "agn.h" - -/***************************************************************************** - * - * mac80211 entry point functions - * - *****************************************************************************/ - -static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION), - }, -}; - -static const struct ieee80211_iface_combination -iwlagn_iface_combinations_dualmode[] = { - { .num_different_channels = 1, - .max_interfaces = 2, - .beacon_int_infra_match = true, - .limits = iwlagn_sta_ap_limits, - .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), - }, - { .num_different_channels = 1, - .max_interfaces = 2, - .limits = iwlagn_2sta_limits, - .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), - }, -}; - -/* - * Not a mac80211 entry point function, but it fits in with all the - * other mac80211 functions grouped here. - */ -int iwlagn_mac_setup_register(struct iwl_priv *priv, - const struct iwl_ucode_capabilities *capa) -{ - int ret; - struct ieee80211_hw *hw = priv->hw; - struct iwl_rxon_context *ctx; - - hw->rate_control_algorithm = "iwl-agn-rs"; - - /* Tell mac80211 our characteristics */ - ieee80211_hw_set(hw, SIGNAL_DBM); - ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC); - ieee80211_hw_set(hw, SPECTRUM_MGMT); - ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, QUEUE_CONTROL); - ieee80211_hw_set(hw, SUPPORTS_PS); - ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); - ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); - ieee80211_hw_set(hw, WANT_MONITOR_VIF); - - hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; - hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; - - /* - * Including the following line will crash some AP's. This - * workaround removes the stimulus which causes the crash until - * the AP software can be fixed. - hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - */ - - if (priv->nvm_data->sku_cap_11n_enable) - hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS | - NL80211_FEATURE_STATIC_SMPS; - - /* - * Enable 11w if advertised by firmware and software crypto - * is not enabled (as the firmware will interpret some mgmt - * packets, so enabling it with software crypto isn't safe) - */ - if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && - !iwlwifi_mod_params.sw_crypto) - ieee80211_hw_set(hw, MFP_CAPABLE); - - hw->sta_data_size = sizeof(struct iwl_station_priv); - hw->vif_data_size = sizeof(struct iwl_vif_priv); - - for_each_context(priv, ctx) { - hw->wiphy->interface_modes |= ctx->interface_modes; - hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; - } - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { - hw->wiphy->iface_combinations = - iwlagn_iface_combinations_dualmode; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwlagn_iface_combinations_dualmode); - } - - hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; - hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | - REGULATORY_DISABLE_BEACON_HINTS; - -#ifdef CONFIG_PM_SLEEP - if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && - priv->trans->ops->d3_suspend && - priv->trans->ops->d3_resume && - device_can_wakeup(priv->trans->dev)) { - priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; - if (!iwlwifi_mod_params.sw_crypto) - priv->wowlan_support.flags |= - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE; - - priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; - priv->wowlan_support.pattern_min_len = - IWLAGN_WOWLAN_MIN_PATTERN_LEN; - priv->wowlan_support.pattern_max_len = - IWLAGN_WOWLAN_MAX_PATTERN_LEN; - hw->wiphy->wowlan = &priv->wowlan_support; - } -#endif - - if (iwlwifi_mod_params.power_save) - hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; - else - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; - /* we create the 802.11 header and a max-length SSID element */ - hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34; - - /* - * We don't use all queues: 4 and 9 are unused and any - * aggregation queue gets mapped down to the AC queue. - */ - hw->queues = IWLAGN_FIRST_AMPDU_QUEUE; - - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - - if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->nvm_data->bands[IEEE80211_BAND_2GHZ]; - if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->nvm_data->bands[IEEE80211_BAND_5GHZ]; - - hw->wiphy->hw_version = priv->trans->hw_id; - - iwl_leds_init(priv); - - ret = ieee80211_register_hw(priv->hw); - if (ret) { - IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); - iwl_leds_exit(priv); - return ret; - } - priv->mac80211_registered = 1; - - return 0; -} - -void iwlagn_mac_unregister(struct iwl_priv *priv) -{ - if (!priv->mac80211_registered) - return; - iwl_leds_exit(priv); - ieee80211_unregister_hw(priv->hw); - priv->mac80211_registered = 0; -} - -static int __iwl_up(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - int ret; - - lockdep_assert_held(&priv->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); - return -EIO; - } - - for_each_context(priv, ctx) { - ret = iwlagn_alloc_bcast_station(priv, ctx); - if (ret) { - iwl_dealloc_bcast_stations(priv); - return ret; - } - } - - ret = iwl_trans_start_hw(priv->trans); - if (ret) { - IWL_ERR(priv, "Failed to start HW: %d\n", ret); - goto error; - } - - ret = iwl_run_init_ucode(priv); - if (ret) { - IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); - goto error; - } - - ret = iwl_trans_start_hw(priv->trans); - if (ret) { - IWL_ERR(priv, "Failed to start HW: %d\n", ret); - goto error; - } - - ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR); - if (ret) { - IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); - goto error; - } - - ret = iwl_alive_start(priv); - if (ret) - goto error; - return 0; - - error: - set_bit(STATUS_EXIT_PENDING, &priv->status); - iwl_down(priv); - clear_bit(STATUS_EXIT_PENDING, &priv->status); - - IWL_ERR(priv, "Unable to initialize device.\n"); - return ret; -} - -static int iwlagn_mac_start(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - /* we should be verifying the device is ready to be opened */ - mutex_lock(&priv->mutex); - ret = __iwl_up(priv); - mutex_unlock(&priv->mutex); - if (ret) - return ret; - - IWL_DEBUG_INFO(priv, "Start UP work done.\n"); - - /* Now we should be done, and the READY bit should be set. */ - if (WARN_ON(!test_bit(STATUS_READY, &priv->status))) - ret = -EIO; - - iwlagn_led_enable(priv); - - priv->is_open = 1; - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -static void iwlagn_mac_stop(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!priv->is_open) - return; - - priv->is_open = 0; - - mutex_lock(&priv->mutex); - iwl_down(priv); - mutex_unlock(&priv->mutex); - - iwl_cancel_deferred_work(priv); - - flush_workqueue(priv->workqueue); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - if (iwlwifi_mod_params.sw_crypto) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->mutex); - - if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) - goto out; - - memcpy(priv->kek, data->kek, NL80211_KEK_LEN); - memcpy(priv->kck, data->kck, NL80211_KCK_LEN); - priv->replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); - priv->have_rekey_data = true; - - out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -#ifdef CONFIG_PM_SLEEP - -static int iwlagn_mac_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wowlan) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - int ret; - - if (WARN_ON(!wowlan)) - return -EINVAL; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->mutex); - - /* Don't attempt WoWLAN when not associated, tear down instead. */ - if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || - !iwl_is_associated_ctx(ctx)) { - ret = 1; - goto out; - } - - ret = iwlagn_suspend(priv, wowlan); - if (ret) - goto error; - - /* let the ucode operate on its own */ - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - - iwl_trans_d3_suspend(priv->trans, false); - - goto out; - - error: - priv->wowlan = false; - iwlagn_prepare_restart(priv); - ieee80211_restart_hw(priv->hw); - out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -struct iwl_resume_data { - struct iwl_priv *priv; - struct iwlagn_wowlan_status *cmd; - bool valid; -}; - -static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_resume_data *resume_data = data; - struct iwl_priv *priv = resume_data->priv; - - if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) { - IWL_ERR(priv, "rx wrong size data\n"); - return true; - } - memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd)); - resume_data->valid = true; - - return true; -} - -static int iwlagn_mac_resume(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct ieee80211_vif *vif; - u32 base; - int ret; - enum iwl_d3_status d3_status; - struct error_table_start { - /* cf. struct iwl_error_event_table */ - u32 valid; - u32 error_id; - } err_info; - struct iwl_notification_wait status_wait; - static const u16 status_cmd[] = { - REPLY_WOWLAN_GET_STATUS, - }; - struct iwlagn_wowlan_status status_data = {}; - struct iwl_resume_data resume_data = { - .priv = priv, - .cmd = &status_data, - .valid = false, - }; - struct cfg80211_wowlan_wakeup wakeup = { - .pattern_idx = -1, - }; -#ifdef CONFIG_IWLWIFI_DEBUGFS - const struct fw_img *img; -#endif - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->mutex); - - /* we'll clear ctx->vif during iwlagn_prepare_restart() */ - vif = ctx->vif; - - ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); - if (ret) - goto out_unlock; - - if (d3_status != IWL_D3_STATUS_ALIVE) { - IWL_INFO(priv, "Device was reset during suspend\n"); - goto out_unlock; - } - - /* uCode is no longer operating by itself */ - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - - base = priv->device_pointers.error_event_table; - if (!iwlagn_hw_valid_rtc_data_addr(base)) { - IWL_WARN(priv, "Invalid error table during resume!\n"); - goto out_unlock; - } - - iwl_trans_read_mem_bytes(priv->trans, base, - &err_info, sizeof(err_info)); - - if (err_info.valid) { - IWL_INFO(priv, "error table is valid (%d, 0x%x)\n", - err_info.valid, err_info.error_id); - if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - wakeup.rfkill_release = true; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); - } - goto out_unlock; - } - -#ifdef CONFIG_IWLWIFI_DEBUGFS - img = &priv->fw->img[IWL_UCODE_WOWLAN]; - if (!priv->wowlan_sram) - priv->wowlan_sram = - kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len, - GFP_KERNEL); - - if (priv->wowlan_sram) - iwl_trans_read_mem(priv->trans, 0x800000, - priv->wowlan_sram, - img->sec[IWL_UCODE_SECTION_DATA].len / 4); -#endif - - /* - * This is very strange. The GET_STATUS command is sent but the device - * doesn't reply properly, it seems it doesn't close the RBD so one is - * always left open ... As a result, we need to send another command - * and have to reset the driver afterwards. As we need to switch to - * runtime firmware again that'll happen. - */ - - iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd, - ARRAY_SIZE(status_cmd), iwl_resume_status_fn, - &resume_data); - - iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL); - iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL); - /* an RBD is left open in the firmware now! */ - - ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5); - if (ret) - goto out_unlock; - - if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) { - u32 reasons = le32_to_cpu(status_data.wakeup_reason); - struct cfg80211_wowlan_wakeup *wakeup_report; - - IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons); - - if (reasons) { - if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET) - wakeup.magic_pkt = true; - if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH) - wakeup.pattern_idx = status_data.pattern_number; - if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | - IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE)) - wakeup.disconnect = true; - if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL) - wakeup.gtk_rekey_failure = true; - if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ) - wakeup.eap_identity_req = true; - if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE) - wakeup.four_way_handshake = true; - wakeup_report = &wakeup; - } else { - wakeup_report = NULL; - } - - ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); - } - - priv->wowlan = false; - - iwlagn_prepare_restart(priv); - - memset((void *)&ctx->active, 0, sizeof(ctx->active)); - iwl_connection_init_rx_config(priv, ctx); - iwlagn_set_rxon_chain(priv, ctx); - - out_unlock: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - ieee80211_resume_disconnect(vif); - - return 1; -} - -static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - device_set_wakeup_enable(priv->trans->dev, enabled); -} -#endif - -static void iwlagn_mac_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - if (iwlagn_tx_skb(priv, control->sta, skb)) - ieee80211_free_txskb(hw, skb); -} - -static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); -} - -static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int ret; - bool is_default_wep_key = false; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (iwlwifi_mod_params.sw_crypto) { - IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - /* fall through */ - case WLAN_CIPHER_SUITE_CCMP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - break; - default: - break; - } - - /* - * We could program these keys into the hardware as well, but we - * don't expect much multicast traffic in IBSS and having keys - * for more stations is probably more useful. - * - * Mark key TX-only and return 0. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { - key->hw_key_idx = WEP_INVALID_OFFSET; - return 0; - } - - /* If they key was TX-only, accept deletion */ - if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) - return 0; - - mutex_lock(&priv->mutex); - iwl_scan_cancel_timeout(priv, 100); - - BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); - - /* - * If we are getting WEP group key and we didn't receive any key mapping - * so far, we are in legacy wep mode (group key only), otherwise we are - * in 1X mode. - * In legacy wep mode, we use another host command to the uCode. - */ - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { - if (cmd == SET_KEY) - is_default_wep_key = !ctx->key_mapping_keys; - else - is_default_wep_key = - key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; - } - - - switch (cmd) { - case SET_KEY: - if (is_default_wep_key) { - ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); - break; - } - ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); - if (ret) { - /* - * can't add key for RX, but we don't need it - * in the device for TX so still return 0 - */ - ret = 0; - key->hw_key_idx = WEP_INVALID_OFFSET; - } - - IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); - break; - case DISABLE_KEY: - if (is_default_wep_key) - ret = iwl_remove_default_wep_key(priv, ctx, key); - else - ret = iwl_remove_dynamic_key(priv, ctx, key, sta); - - IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; -} - -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; - - /* disabled by default */ - return false; -} - -static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - int ret = -EINVAL; - struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; - - IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", - sta->addr, tid); - - if (!(priv->nvm_data->sku_cap_11n_enable)) - return -EACCES; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->mutex); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - if (!iwl_enable_rx_ampdu(priv->cfg)) - break; - IWL_DEBUG_HT(priv, "start Rx\n"); - ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); - break; - case IEEE80211_AMPDU_RX_STOP: - IWL_DEBUG_HT(priv, "stop Rx\n"); - ret = iwl_sta_rx_agg_stop(priv, sta, tid); - break; - case IEEE80211_AMPDU_TX_START: - if (!priv->trans->ops->txq_enable) - break; - if (!iwl_enable_tx_ampdu(priv->cfg)) - break; - IWL_DEBUG_HT(priv, "start Tx\n"); - ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); - break; - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - IWL_DEBUG_HT(priv, "Flush Tx\n"); - ret = iwlagn_tx_agg_flush(priv, vif, sta, tid); - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - IWL_DEBUG_HT(priv, "stop Tx\n"); - ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); - if ((ret == 0) && (priv->agg_tids_count > 0)) { - priv->agg_tids_count--; - IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", - priv->agg_tids_count); - } - if (!priv->agg_tids_count && - priv->hw_params.use_rts_for_aggregation) { - /* - * switch off RTS/CTS if it was previously enabled - */ - sta_priv->lq_sta.lq.general_params.flags &= - ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; - iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), - &sta_priv->lq_sta.lq, CMD_ASYNC, false); - } - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size); - break; - } - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - return ret; -} - -static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - bool is_ap = vif->type == NL80211_IFTYPE_STATION; - int ret; - u8 sta_id; - - IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", - sta->addr); - sta_priv->sta_id = IWL_INVALID_STATION; - - atomic_set(&sta_priv->pending_frames, 0); - if (vif->type == NL80211_IFTYPE_AP) - sta_priv->client = true; - - ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, - is_ap, sta, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM (%d)\n", - sta->addr, ret); - /* Should we return success if return code is EEXIST ? */ - return ret; - } - - sta_priv->sta_id = sta_id; - - return 0; -} - -static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - int ret; - - IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr); - - if (vif->type == NL80211_IFTYPE_STATION) { - /* - * Station will be removed from device when the RXON - * is set to unassociated -- just deactivate it here - * to avoid re-programming it. - */ - ret = 0; - iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr); - } else { - ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); - if (ret) - IWL_DEBUG_QUIET_RFKILL(priv, - "Error removing station %pM\n", sta->addr); - } - return ret; -} - -static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - enum ieee80211_sta_state old_state, - enum ieee80211_sta_state new_state) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - enum { - NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT, - } op = NONE; - int ret; - - IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n", - sta->addr, old_state, new_state); - - mutex_lock(&priv->mutex); - if (vif->type == NL80211_IFTYPE_STATION) { - if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) - op = ADD; - else if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST) - op = REMOVE; - else if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_ASSOC) - op = HT_RATE_INIT; - } else { - if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_ASSOC) - op = ADD_RATE_INIT; - else if (old_state == IEEE80211_STA_ASSOC && - new_state == IEEE80211_STA_AUTH) - op = REMOVE; - } - - switch (op) { - case ADD: - ret = iwlagn_mac_sta_add(hw, vif, sta); - if (ret) - break; - /* - * Clear the in-progress flag, the AP station entry was added - * but we'll initialize LQ only when we've associated (which - * would also clear the in-progress flag). This is necessary - * in case we never initialize LQ because association fails. - */ - spin_lock_bh(&priv->sta_lock); - priv->stations[iwl_sta_id(sta)].used &= - ~IWL_STA_UCODE_INPROGRESS; - spin_unlock_bh(&priv->sta_lock); - break; - case REMOVE: - ret = iwlagn_mac_sta_remove(hw, vif, sta); - break; - case ADD_RATE_INIT: - ret = iwlagn_mac_sta_add(hw, vif, sta); - if (ret) - break; - /* Initialize rate scaling */ - IWL_DEBUG_INFO(priv, - "Initializing rate scaling for station %pM\n", - sta->addr); - iwl_rs_rate_init(priv, sta, iwl_sta_id(sta)); - ret = 0; - break; - case HT_RATE_INIT: - /* Initialize rate scaling */ - ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta); - if (ret) - break; - IWL_DEBUG_INFO(priv, - "Initializing rate scaling for station %pM\n", - sta->addr); - iwl_rs_rate_init(priv, sta, iwl_sta_id(sta)); - ret = 0; - break; - default: - ret = 0; - break; - } - - /* - * mac80211 might WARN if we fail, but due the way we - * (badly) handle hard rfkill, we might fail here - */ - if (iwl_is_rfkill(priv)) - ret = 0; - - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *ch_switch) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->chandef.chan; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - /* - * MULTI-FIXME - * When we add support for multiple interfaces, we need to - * revisit this. The channel switch command in the device - * only affects the BSS context, but what does that really - * mean? And what if we get a CSA on the second interface? - * This needs a lot of work. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u16 ch; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->mutex); - - if (iwl_is_rfkill(priv)) - goto out; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || - test_bit(STATUS_SCANNING, &priv->status) || - test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - goto out; - - if (!iwl_is_associated_ctx(ctx)) - goto out; - - if (!priv->lib->set_channel_switch) - goto out; - - ch = channel->hw_value; - if (le16_to_cpu(ctx->active.channel) == ch) - goto out; - - priv->current_ht_config.smps = conf->smps_mode; - - /* Configure HT40 channels */ - switch (cfg80211_get_chandef_type(&ch_switch->chandef)) { - case NL80211_CHAN_NO_HT: - case NL80211_CHAN_HT20: - ctx->ht.is_40mhz = false; - ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; - break; - case NL80211_CHAN_HT40MINUS: - ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - break; - case NL80211_CHAN_HT40PLUS: - ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - break; - } - - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_set_rxon_channel(priv, channel, ctx); - iwl_set_rxon_ht(priv, ht_conf); - iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); - - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); - priv->switch_channel = cpu_to_le16(ch); - if (priv->lib->set_channel_switch(priv, ch_switch)) { - clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); - priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); - } - -out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) -{ - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - return; - - if (ctx->vif) - ieee80211_chswitch_done(ctx->vif, is_success); -} - -static void iwlagn_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - __le32 filter_or = 0, filter_nand = 0; - struct iwl_rxon_context *ctx; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK); - /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->mutex); - - for_each_context(priv, ctx) { - ctx->staging.filter_flags &= ~filter_nand; - ctx->staging.filter_flags |= filter_or; - - /* - * Not committing directly because hardware can perform a scan, - * but we'll eventually commit the filter flags change anyway. - */ - } - - mutex_unlock(&priv->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} - -static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 queues, bool drop) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - u32 scd_queues; - - mutex_lock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); - goto done; - } - if (iwl_is_rfkill(priv)) { - IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); - goto done; - } - - scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1; - scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | - BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); - - if (drop) { - IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", - scd_queues); - if (iwlagn_txfifo_flush(priv, scd_queues)) { - IWL_ERR(priv, "flush request fail\n"); - goto done; - } - } - - IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); -done: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static void iwlagn_mac_event_callback(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - if (event->type != RSSI_EVENT) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - if (event->u.rssi.data == RSSI_EVENT_LOW) - priv->bt_enable_pspoll = true; - else if (event->u.rssi.data == RSSI_EVENT_HIGH) - priv->bt_enable_pspoll = false; - - queue_work(priv->workqueue, &priv->bt_runtime_config); - } else { - IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," - "ignoring RSSI callback\n"); - } - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, bool set) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - queue_work(priv->workqueue, &priv->beacon_update); - - return 0; -} - -static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *ctx = vif_priv->ctx; - int q; - - if (WARN_ON(!ctx)) - return -EINVAL; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!iwl_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return -EIO; - } - - if (queue >= AC_NUM) { - IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); - return 0; - } - - q = AC_NUM - 1 - queue; - - mutex_lock(&priv->mutex); - - ctx->qos_data.def_qos_parm.ac[q].cw_min = - cpu_to_le16(params->cw_min); - ctx->qos_data.def_qos_parm.ac[q].cw_max = - cpu_to_le16(params->cw_max); - ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; - ctx->qos_data.def_qos_parm.ac[q].edca_txop = - cpu_to_le16((params->txop * 32)); - - ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; - - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; -} - -static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - return priv->ibss_manager == IWL_IBSS_MANAGER; -} - -static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - iwl_connection_init_rx_config(priv, ctx); - - iwlagn_set_rxon_chain(priv, ctx); - - return iwlagn_commit_rxon(priv, ctx); -} - -static int iwl_setup_interface(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct ieee80211_vif *vif = ctx->vif; - int err, ac; - - lockdep_assert_held(&priv->mutex); - - /* - * This variable will be correct only when there's just - * a single context, but all code using it is for hardware - * that supports only one context. - */ - priv->iw_mode = vif->type; - - ctx->is_active = true; - - err = iwl_set_mode(priv, ctx); - if (err) { - if (!ctx->always_active) - ctx->is_active = false; - return err; - } - - if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist && - vif->type == NL80211_IFTYPE_ADHOC) { - /* - * pretend to have high BT traffic as long as we - * are operating in IBSS mode, as this will cause - * the rate scaling etc. to behave as intended. - */ - priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; - } - - /* set up queue mappings */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - vif->hw_queue[ac] = ctx->ac_to_queue[ac]; - - if (vif->type == NL80211_IFTYPE_AP) - vif->cab_queue = ctx->mcast_queue; - else - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; - - return 0; -} - -static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - struct iwl_rxon_context *tmp, *ctx = NULL; - int err; - enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); - bool reset = false; - - IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", - viftype, vif->addr); - - mutex_lock(&priv->mutex); - - if (!iwl_is_ready_rf(priv)) { - IWL_WARN(priv, "Try to add interface when device not ready\n"); - err = -EINVAL; - goto out; - } - - for_each_context(priv, tmp) { - u32 possible_modes = - tmp->interface_modes | tmp->exclusive_interface_modes; - - if (tmp->vif) { - /* On reset we need to add the same interface again */ - if (tmp->vif == vif) { - reset = true; - ctx = tmp; - break; - } - - /* check if this busy context is exclusive */ - if (tmp->exclusive_interface_modes & - BIT(tmp->vif->type)) { - err = -EINVAL; - goto out; - } - continue; - } - - if (!(possible_modes & BIT(viftype))) - continue; - - /* have maybe usable context w/o interface */ - ctx = tmp; - break; - } - - if (!ctx) { - err = -EOPNOTSUPP; - goto out; - } - - vif_priv->ctx = ctx; - ctx->vif = vif; - - /* - * In SNIFFER device type, the firmware reports the FCS to - * the host, rather than snipping it off. Unfortunately, - * mac80211 doesn't (yet) provide a per-packet flag for - * this, so that we have to set the hardware flag based - * on the interfaces added. As the monitor interface can - * only be present by itself, and will be removed before - * other interfaces are added, this is safe. - */ - if (vif->type == NL80211_IFTYPE_MONITOR) - ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS); - else - __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags); - - err = iwl_setup_interface(priv, ctx); - if (!err || reset) - goto out; - - ctx->vif = NULL; - priv->iw_mode = NL80211_IFTYPE_STATION; - out: - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - return err; -} - -static void iwl_teardown_interface(struct iwl_priv *priv, - struct ieee80211_vif *vif, - bool mode_change) -{ - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - lockdep_assert_held(&priv->mutex); - - if (priv->scan_vif == vif) { - iwl_scan_cancel_timeout(priv, 200); - iwl_force_scan_end(priv); - } - - if (!mode_change) { - iwl_set_mode(priv, ctx); - if (!ctx->always_active) - ctx->is_active = false; - } - - /* - * When removing the IBSS interface, overwrite the - * BT traffic load with the stored one from the last - * notification, if any. If this is a device that - * doesn't implement this, this has no effect since - * both values are the same and zero. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->bt_traffic_load = priv->last_bt_traffic_load; -} - -static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - mutex_lock(&priv->mutex); - - if (WARN_ON(ctx->vif != vif)) { - struct iwl_rxon_context *tmp; - IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); - for_each_context(priv, tmp) - IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", - tmp->ctxid, tmp, tmp->vif); - } - ctx->vif = NULL; - - iwl_teardown_interface(priv, vif, false); - - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - -} - -static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, bool newp2p) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx, *tmp; - enum nl80211_iftype newviftype = newtype; - u32 interface_modes; - int err; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - newtype = ieee80211_iftype_p2p(newtype, newp2p); - - mutex_lock(&priv->mutex); - - ctx = iwl_rxon_ctx_from_vif(vif); - - /* - * To simplify this code, only support changes on the - * BSS context. The PAN context is usually reassigned - * by creating/removing P2P interfaces anyway. - */ - if (ctx->ctxid != IWL_RXON_CTX_BSS) { - err = -EBUSY; - goto out; - } - - if (!ctx->vif || !iwl_is_ready_rf(priv)) { - /* - * Huh? But wait ... this can maybe happen when - * we're in the middle of a firmware restart! - */ - err = -EBUSY; - goto out; - } - - /* Check if the switch is supported in the same context */ - interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; - if (!(interface_modes & BIT(newtype))) { - err = -EBUSY; - goto out; - } - - if (ctx->exclusive_interface_modes & BIT(newtype)) { - for_each_context(priv, tmp) { - if (ctx == tmp) - continue; - - if (!tmp->is_active) - continue; - - /* - * The current mode switch would be exclusive, but - * another context is active ... refuse the switch. - */ - err = -EBUSY; - goto out; - } - } - - /* success */ - iwl_teardown_interface(priv, vif, true); - vif->type = newviftype; - vif->p2p = newp2p; - err = iwl_setup_interface(priv, ctx); - WARN_ON(err); - /* - * We've switched internally, but submitting to the - * device may have failed for some reason. Mask this - * error, because otherwise mac80211 will not switch - * (and set the interface type back) and we'll be - * out of sync with it. - */ - err = 0; - - out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return err; -} - -static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_scan_request *hw_req) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct cfg80211_scan_request *req = &hw_req->req; - int ret; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (req->n_channels == 0) - return -EINVAL; - - mutex_lock(&priv->mutex); - - /* - * If an internal scan is in progress, just set - * up the scan_request as per above. - */ - if (priv->scan_type != IWL_SCAN_NORMAL) { - IWL_DEBUG_SCAN(priv, - "SCAN request during internal scan - defer\n"); - priv->scan_request = req; - priv->scan_vif = vif; - ret = 0; - } else { - priv->scan_request = req; - priv->scan_vif = vif; - /* - * mac80211 will only ask for one band at a time - * so using channels[0] here is ok - */ - ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, - req->channels[0]->band); - if (ret) { - priv->scan_request = NULL; - priv->scan_vif = NULL; - } - } - - IWL_DEBUG_MAC80211(priv, "leave\n"); - - mutex_unlock(&priv->mutex); - - return ret; -} - -static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) -{ - struct iwl_addsta_cmd cmd = { - .mode = STA_CONTROL_MODIFY_MSK, - .station_flags_msk = STA_FLG_PWR_SAVE_MSK, - .sta.sta_id = sta_id, - }; - - iwl_send_add_sta(priv, &cmd, CMD_ASYNC); -} - -static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - int sta_id; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - switch (cmd) { - case STA_NOTIFY_SLEEP: - WARN_ON(!sta_priv->client); - sta_priv->asleep = true; - if (atomic_read(&sta_priv->pending_frames) > 0) - ieee80211_sta_block_awake(hw, sta, true); - break; - case STA_NOTIFY_AWAKE: - WARN_ON(!sta_priv->client); - if (!sta_priv->asleep) - break; - sta_priv->asleep = false; - sta_id = iwl_sta_id(sta); - if (sta_id != IWL_INVALID_STATION) - iwl_sta_modify_ps_wake(priv, sta_id); - break; - default: - break; - } - IWL_DEBUG_MAC80211(priv, "leave\n"); -} - -const struct ieee80211_ops iwlagn_hw_ops = { - .tx = iwlagn_mac_tx, - .start = iwlagn_mac_start, - .stop = iwlagn_mac_stop, -#ifdef CONFIG_PM_SLEEP - .suspend = iwlagn_mac_suspend, - .resume = iwlagn_mac_resume, - .set_wakeup = iwlagn_mac_set_wakeup, -#endif - .add_interface = iwlagn_mac_add_interface, - .remove_interface = iwlagn_mac_remove_interface, - .change_interface = iwlagn_mac_change_interface, - .config = iwlagn_mac_config, - .configure_filter = iwlagn_configure_filter, - .set_key = iwlagn_mac_set_key, - .update_tkip_key = iwlagn_mac_update_tkip_key, - .set_rekey_data = iwlagn_mac_set_rekey_data, - .conf_tx = iwlagn_mac_conf_tx, - .bss_info_changed = iwlagn_bss_info_changed, - .ampdu_action = iwlagn_mac_ampdu_action, - .hw_scan = iwlagn_mac_hw_scan, - .sta_notify = iwlagn_mac_sta_notify, - .sta_state = iwlagn_mac_sta_state, - .channel_switch = iwlagn_mac_channel_switch, - .flush = iwlagn_mac_flush, - .tx_last_beacon = iwlagn_mac_tx_last_beacon, - .event_callback = iwlagn_mac_event_callback, - .set_tim = iwlagn_mac_set_tim, -}; - -/* This function both allocates and initializes hw and priv. */ -struct ieee80211_hw *iwl_alloc_all(void) -{ - struct iwl_priv *priv; - struct iwl_op_mode *op_mode; - /* mac80211 allocates memory for this device instance, including - * space for this driver's private structure */ - struct ieee80211_hw *hw; - - hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) + - sizeof(struct iwl_op_mode), &iwlagn_hw_ops); - if (!hw) - goto out; - - op_mode = hw->priv; - priv = IWL_OP_MODE_GET_DVM(op_mode); - priv->hw = hw; - -out: - return hw; -} diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c deleted file mode 100644 index e7616f0ee6e8..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ /dev/null @@ -1,2077 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include "iwl-eeprom-read.h" -#include "iwl-eeprom-parse.h" -#include "iwl-io.h" -#include "iwl-trans.h" -#include "iwl-op-mode.h" -#include "iwl-drv.h" -#include "iwl-modparams.h" -#include "iwl-prph.h" - -#include "dev.h" -#include "calib.h" -#include "agn.h" - - -/****************************************************************************** - * - * module boiler plate - * - ******************************************************************************/ - -#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - -static const struct iwl_op_mode_ops iwl_dvm_ops; - -void iwl_update_chain_flags(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - - for_each_context(priv, ctx) { - iwlagn_set_rxon_chain(priv, ctx); - if (ctx->active.rx_chain != ctx->staging.rx_chain) - iwlagn_commit_rxon(priv, ctx); - } -} - -/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ -static void iwl_set_beacon_tim(struct iwl_priv *priv, - struct iwl_tx_beacon_cmd *tx_beacon_cmd, - u8 *beacon, u32 frame_size) -{ - u16 tim_idx; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; - - /* - * The index is relative to frame start but we start looking at the - * variable-length part of the beacon. - */ - tim_idx = mgmt->u.beacon.variable - beacon; - - /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ - while ((tim_idx < (frame_size - 2)) && - (beacon[tim_idx] != WLAN_EID_TIM)) - tim_idx += beacon[tim_idx+1] + 2; - - /* If TIM field was found, set variables */ - if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { - tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); - tx_beacon_cmd->tim_size = beacon[tim_idx+1]; - } else - IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); -} - -int iwlagn_send_beacon_cmd(struct iwl_priv *priv) -{ - struct iwl_tx_beacon_cmd *tx_beacon_cmd; - struct iwl_host_cmd cmd = { - .id = REPLY_TX_BEACON, - }; - struct ieee80211_tx_info *info; - u32 frame_size; - u32 rate_flags; - u32 rate; - - /* - * We have to set up the TX command, the TX Beacon command, and the - * beacon contents. - */ - - lockdep_assert_held(&priv->mutex); - - if (!priv->beacon_ctx) { - IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); - return 0; - } - - if (WARN_ON(!priv->beacon_skb)) - return -EINVAL; - - /* Allocate beacon command */ - if (!priv->beacon_cmd) - priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL); - tx_beacon_cmd = priv->beacon_cmd; - if (!tx_beacon_cmd) - return -ENOMEM; - - frame_size = priv->beacon_skb->len; - - /* Set up TX command fields */ - tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); - tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; - tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | - TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; - - /* Set up TX beacon command fields */ - iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data, - frame_size); - - /* Set up packet rate and flags */ - info = IEEE80211_SKB_CB(priv->beacon_skb); - - /* - * Let's set up the rate at least somewhat correctly; - * it will currently not actually be used by the uCode, - * it uses the broadcast station's rate instead. - */ - if (info->control.rates[0].idx < 0 || - info->control.rates[0].flags & IEEE80211_TX_RC_MCS) - rate = 0; - else - rate = info->control.rates[0].idx; - - priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, - priv->nvm_data->valid_tx_ant); - rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); - - /* In mac80211, rates for 5 GHz start at 0 */ - if (info->band == IEEE80211_BAND_5GHZ) - rate += IWL_FIRST_OFDM_RATE; - else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; - - tx_beacon_cmd->tx.rate_n_flags = - iwl_hw_set_rate_n_flags(rate, rate_flags); - - /* Submit command */ - cmd.len[0] = sizeof(*tx_beacon_cmd); - cmd.data[0] = tx_beacon_cmd; - cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - cmd.len[1] = frame_size; - cmd.data[1] = priv->beacon_skb->data; - cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; - - return iwl_dvm_send_cmd(priv, &cmd); -} - -static void iwl_bg_beacon_update(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, beacon_update); - struct sk_buff *beacon; - - mutex_lock(&priv->mutex); - if (!priv->beacon_ctx) { - IWL_ERR(priv, "updating beacon w/o beacon context!\n"); - goto out; - } - - if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) { - /* - * The ucode will send beacon notifications even in - * IBSS mode, but we don't want to process them. But - * we need to defer the type check to here due to - * requiring locking around the beacon_ctx access. - */ - goto out; - } - - /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ - beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif); - if (!beacon) { - IWL_ERR(priv, "update beacon failed -- keeping old\n"); - goto out; - } - - /* new beacon skb is allocated every time; dispose previous.*/ - dev_kfree_skb(priv->beacon_skb); - - priv->beacon_skb = beacon; - - iwlagn_send_beacon_cmd(priv); - out: - mutex_unlock(&priv->mutex); -} - -static void iwl_bg_bt_runtime_config(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, bt_runtime_config); - - mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - /* dont send host command if rf-kill is on */ - if (!iwl_is_ready_rf(priv)) - goto out; - - iwlagn_send_advance_bt_config(priv); -out: - mutex_unlock(&priv->mutex); -} - -static void iwl_bg_bt_full_concurrency(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, bt_full_concurrency); - struct iwl_rxon_context *ctx; - - mutex_lock(&priv->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - goto out; - - /* dont send host command if rf-kill is on */ - if (!iwl_is_ready_rf(priv)) - goto out; - - IWL_DEBUG_INFO(priv, "BT coex in %s mode\n", - priv->bt_full_concurrent ? - "full concurrency" : "3-wire"); - - /* - * LQ & RXON updated cmds must be sent before BT Config cmd - * to avoid 3-wire collisions - */ - for_each_context(priv, ctx) { - iwlagn_set_rxon_chain(priv, ctx); - iwlagn_commit_rxon(priv, ctx); - } - - iwlagn_send_advance_bt_config(priv); -out: - mutex_unlock(&priv->mutex); -} - -int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) -{ - struct iwl_statistics_cmd statistics_cmd = { - .configuration_flags = - clear ? IWL_STATS_CONF_CLEAR_STATS : 0, - }; - - if (flags & CMD_ASYNC) - return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, - CMD_ASYNC, - sizeof(struct iwl_statistics_cmd), - &statistics_cmd); - else - return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0, - sizeof(struct iwl_statistics_cmd), - &statistics_cmd); -} - -/** - * iwl_bg_statistics_periodic - Timer callback to queue statistics - * - * This callback is provided in order to send a statistics request. - * - * This timer function is continually reset to execute within - * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION - * was received. We need to ensure we receive the statistics in order - * to update the temperature used for calibrating the TXPOWER. - */ -static void iwl_bg_statistics_periodic(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* dont send host command if rf-kill is on */ - if (!iwl_is_ready_rf(priv)) - return; - - iwl_send_statistics_request(priv, CMD_ASYNC, false); -} - - -static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, - u32 start_idx, u32 num_events, - u32 capacity, u32 mode) -{ - u32 i; - u32 ptr; /* SRAM byte address of log data */ - u32 ev, time, data; /* event log data */ - unsigned long reg_flags; - - if (mode == 0) - ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); - else - ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); - - /* Make sure device is powered up for SRAM reads */ - if (!iwl_trans_grab_nic_access(priv->trans, false, ®_flags)) - return; - - /* Set starting address; reads will auto-increment */ - iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr); - - /* - * Refuse to read more than would have fit into the log from - * the current start_idx. This used to happen due to the race - * described below, but now WARN because the code below should - * prevent it from happening here. - */ - if (WARN_ON(num_events > capacity - start_idx)) - num_events = capacity - start_idx; - - /* - * "time" is actually "data" for mode 0 (no timestamp). - * place event id # at far right for easier visual parsing. - */ - for (i = 0; i < num_events; i++) { - ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); - time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); - if (mode == 0) { - trace_iwlwifi_dev_ucode_cont_event( - priv->trans->dev, 0, time, ev); - } else { - data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); - trace_iwlwifi_dev_ucode_cont_event( - priv->trans->dev, time, data, ev); - } - } - /* Allow device to power down */ - iwl_trans_release_nic_access(priv->trans, ®_flags); -} - -static void iwl_continuous_event_trace(struct iwl_priv *priv) -{ - u32 capacity; /* event log capacity in # entries */ - struct { - u32 capacity; - u32 mode; - u32 wrap_counter; - u32 write_counter; - } __packed read; - u32 base; /* SRAM byte address of event log header */ - u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ - u32 num_wraps; /* # times uCode wrapped to top of log */ - u32 next_entry; /* index of next entry to be written by uCode */ - - base = priv->device_pointers.log_event_table; - if (iwlagn_hw_valid_rtc_data_addr(base)) { - iwl_trans_read_mem_bytes(priv->trans, base, - &read, sizeof(read)); - capacity = read.capacity; - mode = read.mode; - num_wraps = read.wrap_counter; - next_entry = read.write_counter; - } else - return; - - /* - * Unfortunately, the uCode doesn't use temporary variables. - * Therefore, it can happen that we read next_entry == capacity, - * which really means next_entry == 0. - */ - if (unlikely(next_entry == capacity)) - next_entry = 0; - /* - * Additionally, the uCode increases the write pointer before - * the wraps counter, so if the write pointer is smaller than - * the old write pointer (wrap occurred) but we read that no - * wrap occurred, we actually read between the next_entry and - * num_wraps update (this does happen in practice!!) -- take - * that into account by increasing num_wraps. - */ - if (unlikely(next_entry < priv->event_log.next_entry && - num_wraps == priv->event_log.num_wraps)) - num_wraps++; - - if (num_wraps == priv->event_log.num_wraps) { - iwl_print_cont_event_trace( - priv, base, priv->event_log.next_entry, - next_entry - priv->event_log.next_entry, - capacity, mode); - - priv->event_log.non_wraps_count++; - } else { - if (num_wraps - priv->event_log.num_wraps > 1) - priv->event_log.wraps_more_count++; - else - priv->event_log.wraps_once_count++; - - trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev, - num_wraps - priv->event_log.num_wraps, - next_entry, priv->event_log.next_entry); - - if (next_entry < priv->event_log.next_entry) { - iwl_print_cont_event_trace( - priv, base, priv->event_log.next_entry, - capacity - priv->event_log.next_entry, - capacity, mode); - - iwl_print_cont_event_trace( - priv, base, 0, next_entry, capacity, mode); - } else { - iwl_print_cont_event_trace( - priv, base, next_entry, - capacity - next_entry, - capacity, mode); - - iwl_print_cont_event_trace( - priv, base, 0, next_entry, capacity, mode); - } - } - - priv->event_log.num_wraps = num_wraps; - priv->event_log.next_entry = next_entry; -} - -/** - * iwl_bg_ucode_trace - Timer callback to log ucode event - * - * The timer is continually set to execute every - * UCODE_TRACE_PERIOD milliseconds after the last timer expired - * this function is to perform continuous uCode event logging operation - * if enabled - */ -static void iwl_bg_ucode_trace(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (priv->event_log.ucode_trace) { - iwl_continuous_event_trace(priv); - /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ - mod_timer(&priv->ucode_trace, - jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); - } -} - -static void iwl_bg_tx_flush(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, tx_flush); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* do nothing if rf-kill is on */ - if (!iwl_is_ready_rf(priv)) - return; - - IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); - iwlagn_dev_txfifo_flush(priv); -} - -/* - * queue/FIFO/AC mapping definitions - */ - -static const u8 iwlagn_bss_ac_to_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, -}; - -static const u8 iwlagn_bss_ac_to_queue[] = { - 0, 1, 2, 3, -}; - -static const u8 iwlagn_pan_ac_to_fifo[] = { - IWL_TX_FIFO_VO_IPAN, - IWL_TX_FIFO_VI_IPAN, - IWL_TX_FIFO_BE_IPAN, - IWL_TX_FIFO_BK_IPAN, -}; - -static const u8 iwlagn_pan_ac_to_queue[] = { - 7, 6, 5, 4, -}; - -static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) -{ - int i; - - /* - * The default context is always valid, - * the PAN context depends on uCode. - */ - priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); - if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) - priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN); - - for (i = 0; i < NUM_IWL_RXON_CTX; i++) - priv->contexts[i].ctxid = i; - - priv->contexts[IWL_RXON_CTX_BSS].always_active = true; - priv->contexts[IWL_RXON_CTX_BSS].is_active = true; - priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; - priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; - priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; - priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; - priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; - priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; - priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; - priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR); - priv->contexts[IWL_RXON_CTX_BSS].interface_modes = - BIT(NL80211_IFTYPE_STATION); - priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; - priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; - priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; - priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; - memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue, - iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue)); - memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo, - iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo)); - - priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; - priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = - REPLY_WIPAN_RXON_TIMING; - priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = - REPLY_WIPAN_RXON_ASSOC; - priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM; - priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN; - priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY; - priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID; - priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION; - priv->contexts[IWL_RXON_CTX_PAN].interface_modes = - BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); - - priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; - priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; - priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; - memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue, - iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue)); - memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo, - iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo)); - priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); -} - -static void iwl_rf_kill_ct_config(struct iwl_priv *priv) -{ - struct iwl_ct_kill_config cmd; - struct iwl_ct_kill_throttling_config adv_cmd; - int ret = 0; - - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - - priv->thermal_throttle.ct_kill_toggle = false; - - if (priv->lib->support_ct_kill_exit) { - adv_cmd.critical_temperature_enter = - cpu_to_le32(priv->hw_params.ct_kill_threshold); - adv_cmd.critical_temperature_exit = - cpu_to_le32(priv->hw_params.ct_kill_exit_threshold); - - ret = iwl_dvm_send_cmd_pdu(priv, - REPLY_CT_KILL_CONFIG_CMD, - 0, sizeof(adv_cmd), &adv_cmd); - if (ret) - IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); - else - IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " - "succeeded, critical temperature enter is %d," - "exit is %d\n", - priv->hw_params.ct_kill_threshold, - priv->hw_params.ct_kill_exit_threshold); - } else { - cmd.critical_temperature_R = - cpu_to_le32(priv->hw_params.ct_kill_threshold); - - ret = iwl_dvm_send_cmd_pdu(priv, - REPLY_CT_KILL_CONFIG_CMD, - 0, sizeof(cmd), &cmd); - if (ret) - IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); - else - IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " - "succeeded, " - "critical temperature is %d\n", - priv->hw_params.ct_kill_threshold); - } -} - -static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg) -{ - struct iwl_calib_cfg_cmd calib_cfg_cmd; - struct iwl_host_cmd cmd = { - .id = CALIBRATION_CFG_CMD, - .len = { sizeof(struct iwl_calib_cfg_cmd), }, - .data = { &calib_cfg_cmd, }, - }; - - memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); - calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL; - calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg); - - return iwl_dvm_send_cmd(priv, &cmd); -} - - -static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) -{ - struct iwl_tx_ant_config_cmd tx_ant_cmd = { - .valid = cpu_to_le32(valid_tx_ant), - }; - - if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) { - IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); - return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0, - sizeof(struct iwl_tx_ant_config_cmd), - &tx_ant_cmd); - } else { - IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); - return -EOPNOTSUPP; - } -} - -static void iwl_send_bt_config(struct iwl_priv *priv) -{ - struct iwl_bt_cmd bt_cmd = { - .lead_time = BT_LEAD_TIME_DEF, - .max_kill = BT_MAX_KILL_DEF, - .kill_ack_mask = 0, - .kill_cts_mask = 0, - }; - - if (!iwlwifi_mod_params.bt_coex_active) - bt_cmd.flags = BT_COEX_DISABLE; - else - bt_cmd.flags = BT_COEX_ENABLE; - - priv->bt_enable_flag = bt_cmd.flags; - IWL_DEBUG_INFO(priv, "BT coex %s\n", - (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); - - if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, - 0, sizeof(struct iwl_bt_cmd), &bt_cmd)) - IWL_ERR(priv, "failed to send BT Coex Config\n"); -} - -/** - * iwl_alive_start - called after REPLY_ALIVE notification received - * from protocol/runtime uCode (initialization uCode's - * Alive gets handled by iwl_init_alive_start()). - */ -int iwl_alive_start(struct iwl_priv *priv) -{ - int ret = 0; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - - /* After the ALIVE response, we can send host commands to the uCode */ - set_bit(STATUS_ALIVE, &priv->status); - - if (iwl_is_rfkill(priv)) - return -ERFKILL; - - if (priv->event_log.ucode_trace) { - /* start collecting data now */ - mod_timer(&priv->ucode_trace, jiffies); - } - - /* download priority table before any calibration request */ - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - /* Configure Bluetooth device coexistence support */ - if (priv->lib->bt_params->bt_sco_disable) - priv->bt_enable_pspoll = false; - else - priv->bt_enable_pspoll = true; - - priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; - priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; - priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; - iwlagn_send_advance_bt_config(priv); - priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; - priv->cur_rssi_ctx = NULL; - - iwl_send_prio_tbl(priv); - - /* FIXME: w/a to force change uCode BT state machine */ - ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); - if (ret) - return ret; - ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); - if (ret) - return ret; - } else if (priv->lib->bt_params) { - /* - * default is 2-wire BT coexexistence support - */ - iwl_send_bt_config(priv); - } - - /* - * Perform runtime calibrations, including DC calibration. - */ - iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX); - - ieee80211_wake_queues(priv->hw); - - /* Configure Tx antenna selection based on H/W config */ - iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant); - - if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { - struct iwl_rxon_cmd *active_rxon = - (struct iwl_rxon_cmd *)&ctx->active; - /* apply any changes in staging */ - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - } else { - struct iwl_rxon_context *tmp; - /* Initialize our rx_config data */ - for_each_context(priv, tmp) - iwl_connection_init_rx_config(priv, tmp); - - iwlagn_set_rxon_chain(priv, ctx); - } - - if (!priv->wowlan) { - /* WoWLAN ucode will not reply in the same way, skip it */ - iwl_reset_run_time_calib(priv); - } - - set_bit(STATUS_READY, &priv->status); - - /* Configure the adapter for unassociated operation */ - ret = iwlagn_commit_rxon(priv, ctx); - if (ret) - return ret; - - /* At this point, the NIC is initialized and operational */ - iwl_rf_kill_ct_config(priv); - - IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); - - return iwl_power_update_mode(priv, true); -} - -/** - * iwl_clear_driver_stations - clear knowledge of all stations from driver - * @priv: iwl priv struct - * - * This is called during iwl_down() to make sure that in the case - * we're coming there from a hardware restart mac80211 will be - * able to reconfigure stations -- if we're getting there in the - * normal down flow then the stations will already be cleared. - */ -static void iwl_clear_driver_stations(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - - spin_lock_bh(&priv->sta_lock); - memset(priv->stations, 0, sizeof(priv->stations)); - priv->num_stations = 0; - - priv->ucode_key_table = 0; - - for_each_context(priv, ctx) { - /* - * Remove all key information that is not stored as part - * of station information since mac80211 may not have had - * a chance to remove all the keys. When device is - * reconfigured by mac80211 after an error all keys will - * be reconfigured. - */ - memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); - ctx->key_mapping_keys = 0; - } - - spin_unlock_bh(&priv->sta_lock); -} - -void iwl_down(struct iwl_priv *priv) -{ - int exit_pending; - - IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); - - lockdep_assert_held(&priv->mutex); - - iwl_scan_cancel_timeout(priv, 200); - - exit_pending = - test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); - - iwl_clear_ucode_stations(priv, NULL); - iwl_dealloc_bcast_stations(priv); - iwl_clear_driver_stations(priv); - - /* reset BT coex data */ - priv->bt_status = 0; - priv->cur_rssi_ctx = NULL; - priv->bt_is_sco = 0; - if (priv->lib->bt_params) - priv->bt_traffic_load = - priv->lib->bt_params->bt_init_traffic_load; - else - priv->bt_traffic_load = 0; - priv->bt_full_concurrent = false; - priv->bt_ci_compliance = 0; - - /* Wipe out the EXIT_PENDING status bit if we are not actually - * exiting the module */ - if (!exit_pending) - clear_bit(STATUS_EXIT_PENDING, &priv->status); - - if (priv->mac80211_registered) - ieee80211_stop_queues(priv->hw); - - priv->ucode_loaded = false; - iwl_trans_stop_device(priv->trans); - - /* Set num_aux_in_flight must be done after the transport is stopped */ - atomic_set(&priv->num_aux_in_flight, 0); - - /* Clear out all status bits but a few that are stable across reset */ - priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << - STATUS_RF_KILL_HW | - test_bit(STATUS_FW_ERROR, &priv->status) << - STATUS_FW_ERROR | - test_bit(STATUS_EXIT_PENDING, &priv->status) << - STATUS_EXIT_PENDING; - - dev_kfree_skb(priv->beacon_skb); - priv->beacon_skb = NULL; -} - -/***************************************************************************** - * - * Workqueue callbacks - * - *****************************************************************************/ - -static void iwl_bg_run_time_calib_work(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - run_time_calib_work); - - mutex_lock(&priv->mutex); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || - test_bit(STATUS_SCANNING, &priv->status)) { - mutex_unlock(&priv->mutex); - return; - } - - if (priv->start_calib) { - iwl_chain_noise_calibration(priv); - iwl_sensitivity_calibration(priv); - } - - mutex_unlock(&priv->mutex); -} - -void iwlagn_prepare_restart(struct iwl_priv *priv) -{ - bool bt_full_concurrent; - u8 bt_ci_compliance; - u8 bt_load; - u8 bt_status; - bool bt_is_sco; - int i; - - lockdep_assert_held(&priv->mutex); - - priv->is_open = 0; - - /* - * __iwl_down() will clear the BT status variables, - * which is correct, but when we restart we really - * want to keep them so restore them afterwards. - * - * The restart process will later pick them up and - * re-configure the hw when we reconfigure the BT - * command. - */ - bt_full_concurrent = priv->bt_full_concurrent; - bt_ci_compliance = priv->bt_ci_compliance; - bt_load = priv->bt_traffic_load; - bt_status = priv->bt_status; - bt_is_sco = priv->bt_is_sco; - - iwl_down(priv); - - priv->bt_full_concurrent = bt_full_concurrent; - priv->bt_ci_compliance = bt_ci_compliance; - priv->bt_traffic_load = bt_load; - priv->bt_status = bt_status; - priv->bt_is_sco = bt_is_sco; - - /* reset aggregation queues */ - for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++) - priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; - /* and stop counts */ - for (i = 0; i < IWL_MAX_HW_QUEUES; i++) - atomic_set(&priv->queue_stop_count[i], 0); - - memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc)); -} - -static void iwl_bg_restart(struct work_struct *data) -{ - struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { - mutex_lock(&priv->mutex); - iwlagn_prepare_restart(priv); - mutex_unlock(&priv->mutex); - iwl_cancel_deferred_work(priv); - if (priv->mac80211_registered) - ieee80211_restart_hw(priv->hw); - else - IWL_ERR(priv, - "Cannot request restart before registrating with mac80211\n"); - } else { - WARN_ON(1); - } -} - -/***************************************************************************** - * - * driver setup and teardown - * - *****************************************************************************/ - -static void iwl_setup_deferred_work(struct iwl_priv *priv) -{ - priv->workqueue = create_singlethread_workqueue(DRV_NAME); - - INIT_WORK(&priv->restart, iwl_bg_restart); - INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); - INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); - INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); - INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); - INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); - - iwl_setup_scan_deferred_work(priv); - - if (priv->lib->bt_params) - iwlagn_bt_setup_deferred_work(priv); - - setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic, - (unsigned long)priv); - - setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace, - (unsigned long)priv); -} - -void iwl_cancel_deferred_work(struct iwl_priv *priv) -{ - if (priv->lib->bt_params) - iwlagn_bt_cancel_deferred_work(priv); - - cancel_work_sync(&priv->run_time_calib_work); - cancel_work_sync(&priv->beacon_update); - - iwl_cancel_scan_deferred_work(priv); - - cancel_work_sync(&priv->bt_full_concurrency); - cancel_work_sync(&priv->bt_runtime_config); - - del_timer_sync(&priv->statistics_periodic); - del_timer_sync(&priv->ucode_trace); -} - -static int iwl_init_drv(struct iwl_priv *priv) -{ - spin_lock_init(&priv->sta_lock); - - mutex_init(&priv->mutex); - - INIT_LIST_HEAD(&priv->calib_results); - - priv->band = IEEE80211_BAND_2GHZ; - - priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; - - priv->iw_mode = NL80211_IFTYPE_STATION; - priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; - priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; - priv->agg_tids_count = 0; - - priv->rx_statistics_jiffies = jiffies; - - /* Choose which receivers/antennas to use */ - iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); - - iwl_init_scan_params(priv); - - /* init bt coex */ - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; - priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; - priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; - priv->bt_on_thresh = BT_ON_THRESHOLD_DEF; - priv->bt_duration = BT_DURATION_LIMIT_DEF; - priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; - } - - return 0; -} - -static void iwl_uninit_drv(struct iwl_priv *priv) -{ - kfree(priv->scan_cmd); - kfree(priv->beacon_cmd); - kfree(rcu_dereference_raw(priv->noa_data)); - iwl_calib_free_results(priv); -#ifdef CONFIG_IWLWIFI_DEBUGFS - kfree(priv->wowlan_sram); -#endif -} - -static void iwl_set_hw_params(struct iwl_priv *priv) -{ - if (priv->cfg->ht_params) - priv->hw_params.use_rts_for_aggregation = - priv->cfg->ht_params->use_rts_for_aggregation; - - /* Device-specific setup */ - priv->lib->set_hw_params(priv); -} - - - -/* show what optional capabilities we have */ -static void iwl_option_config(struct iwl_priv *priv) -{ -#ifdef CONFIG_IWLWIFI_DEBUG - IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n"); -#else - IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n"); -#endif - -#ifdef CONFIG_IWLWIFI_DEBUGFS - IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n"); -#else - IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n"); -#endif - -#ifdef CONFIG_IWLWIFI_DEVICE_TRACING - IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n"); -#else - IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n"); -#endif -} - -static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) -{ - struct iwl_nvm_data *data = priv->nvm_data; - - if (data->sku_cap_11n_enable && - !priv->cfg->ht_params) { - IWL_ERR(priv, "Invalid 11n configuration\n"); - return -EINVAL; - } - - if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable && - !data->sku_cap_band_52GHz_enable) { - IWL_ERR(priv, "Invalid device sku\n"); - return -EINVAL; - } - - IWL_DEBUG_INFO(priv, - "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n", - data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", - data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", - data->sku_cap_11n_enable ? "" : "NOT", "enabled"); - - priv->hw_params.tx_chains_num = - num_of_ant(data->valid_tx_ant); - if (priv->cfg->rx_with_siso_diversity) - priv->hw_params.rx_chains_num = 1; - else - priv->hw_params.rx_chains_num = - num_of_ant(data->valid_rx_ant); - - IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", - data->valid_tx_ant, - data->valid_rx_ant); - - return 0; -} - -static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, - const struct iwl_cfg *cfg, - const struct iwl_fw *fw, - struct dentry *dbgfs_dir) -{ - struct iwl_priv *priv; - struct ieee80211_hw *hw; - struct iwl_op_mode *op_mode; - u16 num_mac; - u32 ucode_flags; - struct iwl_trans_config trans_cfg = {}; - static const u8 no_reclaim_cmds[] = { - REPLY_RX_PHY_CMD, - REPLY_RX_MPDU_CMD, - REPLY_COMPRESSED_BA, - STATISTICS_NOTIFICATION, - REPLY_TX, - }; - int i; - - /************************ - * 1. Allocating HW data - ************************/ - hw = iwl_alloc_all(); - if (!hw) { - pr_err("%s: Cannot allocate network device\n", cfg->name); - goto out; - } - - op_mode = hw->priv; - op_mode->ops = &iwl_dvm_ops; - priv = IWL_OP_MODE_GET_DVM(op_mode); - priv->trans = trans; - priv->dev = trans->dev; - priv->cfg = cfg; - priv->fw = fw; - - switch (priv->cfg->device_family) { - case IWL_DEVICE_FAMILY_1000: - case IWL_DEVICE_FAMILY_100: - priv->lib = &iwl_dvm_1000_cfg; - break; - case IWL_DEVICE_FAMILY_2000: - priv->lib = &iwl_dvm_2000_cfg; - break; - case IWL_DEVICE_FAMILY_105: - priv->lib = &iwl_dvm_105_cfg; - break; - case IWL_DEVICE_FAMILY_2030: - case IWL_DEVICE_FAMILY_135: - priv->lib = &iwl_dvm_2030_cfg; - break; - case IWL_DEVICE_FAMILY_5000: - priv->lib = &iwl_dvm_5000_cfg; - break; - case IWL_DEVICE_FAMILY_5150: - priv->lib = &iwl_dvm_5150_cfg; - break; - case IWL_DEVICE_FAMILY_6000: - case IWL_DEVICE_FAMILY_6000i: - priv->lib = &iwl_dvm_6000_cfg; - break; - case IWL_DEVICE_FAMILY_6005: - priv->lib = &iwl_dvm_6005_cfg; - break; - case IWL_DEVICE_FAMILY_6050: - case IWL_DEVICE_FAMILY_6150: - priv->lib = &iwl_dvm_6050_cfg; - break; - case IWL_DEVICE_FAMILY_6030: - priv->lib = &iwl_dvm_6030_cfg; - break; - default: - break; - } - - if (WARN_ON(!priv->lib)) - goto out_free_hw; - - /* - * Populate the state variables that the transport layer needs - * to know about. - */ - trans_cfg.op_mode = op_mode; - trans_cfg.no_reclaim_cmds = no_reclaim_cmds; - trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; - trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED; - - trans_cfg.command_names = iwl_dvm_cmd_strings; - trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM; - - WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE < - priv->cfg->base_params->num_of_queues); - - ucode_flags = fw->ucode_capa.flags; - - if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { - priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; - trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; - } else { - priv->sta_key_max_num = STA_KEY_MAX_NUM; - trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; - } - - /* Configure transport layer */ - iwl_trans_configure(priv->trans, &trans_cfg); - - trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); - - /* At this point both hw and priv are allocated. */ - - SET_IEEE80211_DEV(priv->hw, priv->trans->dev); - - iwl_option_config(priv); - - IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); - - /* is antenna coupling more than 35dB ? */ - priv->bt_ant_couple_ok = - (iwlwifi_mod_params.ant_coupling > - IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? - true : false; - - /* bt channel inhibition enabled*/ - priv->bt_ch_announce = true; - IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", - (priv->bt_ch_announce) ? "On" : "Off"); - - /* these spin locks will be used in apm_ops.init and EEPROM access - * we should init now - */ - spin_lock_init(&priv->statistics.lock); - - /*********************** - * 2. Read REV register - ***********************/ - IWL_INFO(priv, "Detected %s, REV=0x%X\n", - priv->cfg->name, priv->trans->hw_rev); - - if (iwl_trans_start_hw(priv->trans)) - goto out_free_hw; - - /* Read the EEPROM */ - if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob, - &priv->eeprom_blob_size)) { - IWL_ERR(priv, "Unable to init EEPROM\n"); - goto out_free_hw; - } - - /* Reset chip to save power until we load uCode during "up". */ - iwl_trans_stop_device(priv->trans); - - priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg, - priv->eeprom_blob, - priv->eeprom_blob_size); - if (!priv->nvm_data) - goto out_free_eeprom_blob; - - if (iwl_nvm_check_version(priv->nvm_data, priv->trans)) - goto out_free_eeprom; - - if (iwl_eeprom_init_hw_params(priv)) - goto out_free_eeprom; - - /* extract MAC Address */ - memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN); - IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); - priv->hw->wiphy->addresses = priv->addresses; - priv->hw->wiphy->n_addresses = 1; - num_mac = priv->nvm_data->n_hw_addrs; - if (num_mac > 1) { - memcpy(priv->addresses[1].addr, priv->addresses[0].addr, - ETH_ALEN); - priv->addresses[1].addr[5]++; - priv->hw->wiphy->n_addresses++; - } - - /************************ - * 4. Setup HW constants - ************************/ - iwl_set_hw_params(priv); - - if (!(priv->nvm_data->sku_cap_ipan_enable)) { - IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n"); - ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; - /* - * if not PAN, then don't support P2P -- might be a uCode - * packaging bug or due to the eeprom check above - */ - priv->sta_key_max_num = STA_KEY_MAX_NUM; - trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; - - /* Configure transport layer again*/ - iwl_trans_configure(priv->trans, &trans_cfg); - } - - /******************* - * 5. Setup priv - *******************/ - for (i = 0; i < IWL_MAX_HW_QUEUES; i++) { - priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; - if (i < IWLAGN_FIRST_AMPDU_QUEUE && - i != IWL_DEFAULT_CMD_QUEUE_NUM && - i != IWL_IPAN_CMD_QUEUE_NUM) - priv->queue_to_mac80211[i] = i; - atomic_set(&priv->queue_stop_count[i], 0); - } - - if (iwl_init_drv(priv)) - goto out_free_eeprom; - - /* At this point both hw and priv are initialized. */ - - /******************** - * 6. Setup services - ********************/ - iwl_setup_deferred_work(priv); - iwl_setup_rx_handlers(priv); - - iwl_power_initialize(priv); - iwl_tt_initialize(priv); - - snprintf(priv->hw->wiphy->fw_version, - sizeof(priv->hw->wiphy->fw_version), - "%s", fw->fw_version); - - priv->new_scan_threshold_behaviour = - !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); - - priv->phy_calib_chain_noise_reset_cmd = - fw->ucode_capa.standard_phy_calibration_size; - priv->phy_calib_chain_noise_gain_cmd = - fw->ucode_capa.standard_phy_calibration_size + 1; - - /* initialize all valid contexts */ - iwl_init_context(priv, ucode_flags); - - /************************************************** - * This is still part of probe() in a sense... - * - * 7. Setup and register with mac80211 and debugfs - **************************************************/ - if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) - goto out_destroy_workqueue; - - if (iwl_dbgfs_register(priv, dbgfs_dir)) - goto out_mac80211_unregister; - - return op_mode; - -out_mac80211_unregister: - iwlagn_mac_unregister(priv); -out_destroy_workqueue: - iwl_tt_exit(priv); - iwl_cancel_deferred_work(priv); - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - iwl_uninit_drv(priv); -out_free_eeprom_blob: - kfree(priv->eeprom_blob); -out_free_eeprom: - iwl_free_nvm_data(priv->nvm_data); -out_free_hw: - ieee80211_free_hw(priv->hw); -out: - op_mode = NULL; - return op_mode; -} - -static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); - - iwlagn_mac_unregister(priv); - - iwl_tt_exit(priv); - - kfree(priv->eeprom_blob); - iwl_free_nvm_data(priv->nvm_data); - - /*netif_stop_queue(dev); */ - flush_workqueue(priv->workqueue); - - /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes - * priv->workqueue... so we can't take down the workqueue - * until now... */ - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - - iwl_uninit_drv(priv); - - dev_kfree_skb(priv->beacon_skb); - - iwl_trans_op_mode_leave(priv->trans); - ieee80211_free_hw(priv->hw); -} - -static const char * const desc_lookup_text[] = { - "OK", - "FAIL", - "BAD_PARAM", - "BAD_CHECKSUM", - "NMI_INTERRUPT_WDG", - "SYSASSERT", - "FATAL_ERROR", - "BAD_COMMAND", - "HW_ERROR_TUNE_LOCK", - "HW_ERROR_TEMPERATURE", - "ILLEGAL_CHAN_FREQ", - "VCC_NOT_STABLE", - "FH_ERROR", - "NMI_INTERRUPT_HOST", - "NMI_INTERRUPT_ACTION_PT", - "NMI_INTERRUPT_UNKNOWN", - "UCODE_VERSION_MISMATCH", - "HW_ERROR_ABS_LOCK", - "HW_ERROR_CAL_LOCK_FAIL", - "NMI_INTERRUPT_INST_ACTION_PT", - "NMI_INTERRUPT_DATA_ACTION_PT", - "NMI_TRM_HW_ER", - "NMI_INTERRUPT_TRM", - "NMI_INTERRUPT_BREAK_POINT", - "DEBUG_0", - "DEBUG_1", - "DEBUG_2", - "DEBUG_3", -}; - -static struct { char *name; u8 num; } advanced_lookup[] = { - { "NMI_INTERRUPT_WDG", 0x34 }, - { "SYSASSERT", 0x35 }, - { "UCODE_VERSION_MISMATCH", 0x37 }, - { "BAD_COMMAND", 0x38 }, - { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, - { "FATAL_ERROR", 0x3D }, - { "NMI_TRM_HW_ERR", 0x46 }, - { "NMI_INTERRUPT_TRM", 0x4C }, - { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, - { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, - { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, - { "NMI_INTERRUPT_HOST", 0x66 }, - { "NMI_INTERRUPT_ACTION_PT", 0x7C }, - { "NMI_INTERRUPT_UNKNOWN", 0x84 }, - { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, - { "ADVANCED_SYSASSERT", 0 }, -}; - -static const char *desc_lookup(u32 num) -{ - int i; - int max = ARRAY_SIZE(desc_lookup_text); - - if (num < max) - return desc_lookup_text[num]; - - max = ARRAY_SIZE(advanced_lookup) - 1; - for (i = 0; i < max; i++) { - if (advanced_lookup[i].num == num) - break; - } - return advanced_lookup[i].name; -} - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -static void iwl_dump_nic_error_log(struct iwl_priv *priv) -{ - struct iwl_trans *trans = priv->trans; - u32 base; - struct iwl_error_event_table table; - - base = priv->device_pointers.error_event_table; - if (priv->cur_ucode == IWL_UCODE_INIT) { - if (!base) - base = priv->fw->init_errlog_ptr; - } else { - if (!base) - base = priv->fw->inst_errlog_ptr; - } - - if (!iwlagn_hw_valid_rtc_data_addr(base)) { - IWL_ERR(priv, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (priv->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - /*TODO: Update dbgfs with ISR error stats obtained below */ - iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - priv->status, table.valid); - } - - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.line, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, - table.hw_ver, 0, table.brd_ver); - IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(priv, "0x%08X | uPc\n", table.pc); - IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1); - IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2); - IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1); - IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2); - IWL_ERR(priv, "0x%08X | data1\n", table.data1); - IWL_ERR(priv, "0x%08X | data2\n", table.data2); - IWL_ERR(priv, "0x%08X | line\n", table.line); - IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time); - IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low); - IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi); - IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1); - IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3); - IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver); - IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver); - IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver); - IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd); - IWL_ERR(priv, "0x%08X | isr0\n", table.isr0); - IWL_ERR(priv, "0x%08X | isr1\n", table.isr1); - IWL_ERR(priv, "0x%08X | isr2\n", table.isr2); - IWL_ERR(priv, "0x%08X | isr3\n", table.isr3); - IWL_ERR(priv, "0x%08X | isr4\n", table.isr4); - IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler); -} - -#define EVENT_START_OFFSET (4 * sizeof(u32)) - -/** - * iwl_print_event_log - Dump error event log to syslog - * - */ -static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, - u32 num_events, u32 mode, - int pos, char **buf, size_t bufsz) -{ - u32 i; - u32 base; /* SRAM byte address of event log header */ - u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ - u32 ptr; /* SRAM byte address of log data */ - u32 ev, time, data; /* event log data */ - unsigned long reg_flags; - - struct iwl_trans *trans = priv->trans; - - if (num_events == 0) - return pos; - - base = priv->device_pointers.log_event_table; - if (priv->cur_ucode == IWL_UCODE_INIT) { - if (!base) - base = priv->fw->init_evtlog_ptr; - } else { - if (!base) - base = priv->fw->inst_evtlog_ptr; - } - - if (mode == 0) - event_size = 2 * sizeof(u32); - else - event_size = 3 * sizeof(u32); - - ptr = base + EVENT_START_OFFSET + (start_idx * event_size); - - /* Make sure device is powered up for SRAM reads */ - if (!iwl_trans_grab_nic_access(trans, false, ®_flags)) - return pos; - - /* Set starting address; reads will auto-increment */ - iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr); - - /* "time" is actually "data" for mode 0 (no timestamp). - * place event id # at far right for easier visual parsing. */ - for (i = 0; i < num_events; i++) { - ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - time = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - if (mode == 0) { - /* data, ev */ - if (bufsz) { - pos += scnprintf(*buf + pos, bufsz - pos, - "EVT_LOG:0x%08x:%04u\n", - time, ev); - } else { - trace_iwlwifi_dev_ucode_event(trans->dev, 0, - time, ev); - IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", - time, ev); - } - } else { - data = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - if (bufsz) { - pos += scnprintf(*buf + pos, bufsz - pos, - "EVT_LOGT:%010u:0x%08x:%04u\n", - time, data, ev); - } else { - IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", - time, data, ev); - trace_iwlwifi_dev_ucode_event(trans->dev, time, - data, ev); - } - } - } - - /* Allow device to power down */ - iwl_trans_release_nic_access(trans, ®_flags); - return pos; -} - -/** - * iwl_print_last_event_logs - Dump the newest # of event log to syslog - */ -static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, - u32 num_wraps, u32 next_entry, - u32 size, u32 mode, - int pos, char **buf, size_t bufsz) -{ - /* - * display the newest DEFAULT_LOG_ENTRIES entries - * i.e the entries just before the next ont that uCode would fill. - */ - if (num_wraps) { - if (next_entry < size) { - pos = iwl_print_event_log(priv, - capacity - (size - next_entry), - size - next_entry, mode, - pos, buf, bufsz); - pos = iwl_print_event_log(priv, 0, - next_entry, mode, - pos, buf, bufsz); - } else - pos = iwl_print_event_log(priv, next_entry - size, - size, mode, pos, buf, bufsz); - } else { - if (next_entry < size) { - pos = iwl_print_event_log(priv, 0, next_entry, - mode, pos, buf, bufsz); - } else { - pos = iwl_print_event_log(priv, next_entry - size, - size, mode, pos, buf, bufsz); - } - } - return pos; -} - -#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) - -int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, - char **buf) -{ - u32 base; /* SRAM byte address of event log header */ - u32 capacity; /* event log capacity in # entries */ - u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ - u32 num_wraps; /* # times uCode wrapped to top of log */ - u32 next_entry; /* index of next entry to be written by uCode */ - u32 size; /* # entries that we'll print */ - u32 logsize; - int pos = 0; - size_t bufsz = 0; - struct iwl_trans *trans = priv->trans; - - base = priv->device_pointers.log_event_table; - if (priv->cur_ucode == IWL_UCODE_INIT) { - logsize = priv->fw->init_evtlog_size; - if (!base) - base = priv->fw->init_evtlog_ptr; - } else { - logsize = priv->fw->inst_evtlog_size; - if (!base) - base = priv->fw->inst_evtlog_ptr; - } - - if (!iwlagn_hw_valid_rtc_data_addr(base)) { - IWL_ERR(priv, - "Invalid event log pointer 0x%08X for %s uCode\n", - base, - (priv->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return -EINVAL; - } - - /* event log header */ - capacity = iwl_trans_read_mem32(trans, base); - mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32))); - num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32))); - next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32))); - - if (capacity > logsize) { - IWL_ERR(priv, "Log capacity %d is bogus, limit to %d " - "entries\n", capacity, logsize); - capacity = logsize; - } - - if (next_entry > logsize) { - IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", - next_entry, logsize); - next_entry = logsize; - } - - size = num_wraps ? capacity : next_entry; - - /* bail out if nothing in log */ - if (size == 0) { - IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n"); - return pos; - } - - if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) - size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) - ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; - IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", - size); - -#ifdef CONFIG_IWLWIFI_DEBUG - if (buf) { - if (full_log) - bufsz = capacity * 48; - else - bufsz = size * 48; - *buf = kmalloc(bufsz, GFP_KERNEL); - if (!*buf) - return -ENOMEM; - } - if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) { - /* - * if uCode has wrapped back to top of log, - * start at the oldest entry, - * i.e the next one that uCode would fill. - */ - if (num_wraps) - pos = iwl_print_event_log(priv, next_entry, - capacity - next_entry, mode, - pos, buf, bufsz); - /* (then/else) start at top of log */ - pos = iwl_print_event_log(priv, 0, - next_entry, mode, pos, buf, bufsz); - } else - pos = iwl_print_last_event_logs(priv, capacity, num_wraps, - next_entry, size, mode, - pos, buf, bufsz); -#else - pos = iwl_print_last_event_logs(priv, capacity, num_wraps, - next_entry, size, mode, - pos, buf, bufsz); -#endif - return pos; -} - -static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) -{ - unsigned int reload_msec; - unsigned long reload_jiffies; - - if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) - iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); - - /* uCode is no longer loaded. */ - priv->ucode_loaded = false; - - /* Set the FW error flag -- cleared on iwl_down */ - set_bit(STATUS_FW_ERROR, &priv->status); - - iwl_abort_notification_waits(&priv->notif_wait); - - /* Keep the restart process from trying to send host - * commands by clearing the ready bit */ - clear_bit(STATUS_READY, &priv->status); - - if (!ondemand) { - /* - * If firmware keep reloading, then it indicate something - * serious wrong and firmware having problem to recover - * from it. Instead of keep trying which will fill the syslog - * and hang the system, let's just stop it - */ - reload_jiffies = jiffies; - reload_msec = jiffies_to_msecs((long) reload_jiffies - - (long) priv->reload_jiffies); - priv->reload_jiffies = reload_jiffies; - if (reload_msec <= IWL_MIN_RELOAD_DURATION) { - priv->reload_count++; - if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) { - IWL_ERR(priv, "BUG_ON, Stop restarting\n"); - return; - } - } else - priv->reload_count = 0; - } - - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { - if (iwlwifi_mod_params.restart_fw) { - IWL_DEBUG_FW_ERRORS(priv, - "Restarting adapter due to uCode error.\n"); - queue_work(priv->workqueue, &priv->restart); - } else - IWL_DEBUG_FW_ERRORS(priv, - "Detected FW error, but not restarting\n"); - } -} - -static void iwl_nic_error(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - IWL_ERR(priv, "Loaded firmware version: %s\n", - priv->fw->fw_version); - - iwl_dump_nic_error_log(priv); - iwl_dump_nic_event_log(priv, false, NULL); - - iwlagn_fw_error(priv, false); -} - -static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - if (!iwl_check_for_ct_kill(priv)) { - IWL_ERR(priv, "Restarting adapter queue is full\n"); - iwlagn_fw_error(priv, false); - } -} - -#define EEPROM_RF_CONFIG_TYPE_MAX 0x3 - -static void iwl_nic_config(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - /* SKU Control */ - iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | - CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP, - (CSR_HW_REV_STEP(priv->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) | - (CSR_HW_REV_DASH(priv->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_DASH)); - - /* write radio config values to register */ - if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) { - u32 reg_val = - priv->nvm_data->radio_cfg_type << - CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE | - priv->nvm_data->radio_cfg_step << - CSR_HW_IF_CONFIG_REG_POS_PHY_STEP | - priv->nvm_data->radio_cfg_dash << - CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; - - iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | - CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | - CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, - reg_val); - - IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n", - priv->nvm_data->radio_cfg_type, - priv->nvm_data->radio_cfg_step, - priv->nvm_data->radio_cfg_dash); - } else { - WARN_ON(1); - } - - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); - - /* W/A : NIC is stuck in a reset state after Early PCIe power off - * (PCIe power is lost before PERST# is asserted), - * causing ME FW to lose ownership and not being able to obtain it back. - */ - iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, - ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); - - if (priv->lib->nic_config) - priv->lib->nic_config(priv); -} - -static void iwl_wimax_active(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - clear_bit(STATUS_READY, &priv->status); - IWL_ERR(priv, "RF is used by WiMAX\n"); -} - -static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - int mq = priv->queue_to_mac80211[queue]; - - if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) - return; - - if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) { - IWL_DEBUG_TX_QUEUES(priv, - "queue %d (mac80211 %d) already stopped\n", - queue, mq); - return; - } - - set_bit(mq, &priv->transport_queue_stop); - ieee80211_stop_queue(priv->hw, mq); -} - -static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - int mq = priv->queue_to_mac80211[queue]; - - if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) - return; - - if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) { - IWL_DEBUG_TX_QUEUES(priv, - "queue %d (mac80211 %d) already awake\n", - queue, mq); - return; - } - - clear_bit(mq, &priv->transport_queue_stop); - - if (!priv->passive_no_rx) - ieee80211_wake_queue(priv->hw, mq); -} - -void iwlagn_lift_passive_no_rx(struct iwl_priv *priv) -{ - int mq; - - if (!priv->passive_no_rx) - return; - - for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) { - if (!test_bit(mq, &priv->transport_queue_stop)) { - IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq); - ieee80211_wake_queue(priv->hw, mq); - } else { - IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq); - } - } - - priv->passive_no_rx = false; -} - -static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - struct ieee80211_tx_info *info; - - info = IEEE80211_SKB_CB(skb); - iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); - ieee80211_free_txskb(priv->hw, skb); -} - -static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - if (state) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); - - return false; -} - -static const struct iwl_op_mode_ops iwl_dvm_ops = { - .start = iwl_op_mode_dvm_start, - .stop = iwl_op_mode_dvm_stop, - .rx = iwl_rx_dispatch, - .queue_full = iwl_stop_sw_queue, - .queue_not_full = iwl_wake_sw_queue, - .hw_rf_kill = iwl_set_hw_rfkill_state, - .free_skb = iwl_free_skb, - .nic_error = iwl_nic_error, - .cmd_queue_full = iwl_cmd_queue_full, - .nic_config = iwl_nic_config, - .wimax_active = iwl_wimax_active, -}; - -/***************************************************************************** - * - * driver and module entry point - * - *****************************************************************************/ -static int __init iwl_init(void) -{ - - int ret; - - ret = iwlagn_rate_control_register(); - if (ret) { - pr_err("Unable to register rate control algorithm: %d\n", ret); - return ret; - } - - ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops); - if (ret) { - pr_err("Unable to register op_mode: %d\n", ret); - iwlagn_rate_control_unregister(); - } - - return ret; -} -module_init(iwl_init); - -static void __exit iwl_exit(void) -{ - iwl_opmode_deregister("iwldvm"); - iwlagn_rate_control_unregister(); -} -module_exit(iwl_exit); diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c deleted file mode 100644 index 1513dbc79c14..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ /dev/null @@ -1,395 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - - -#include -#include -#include -#include -#include "iwl-io.h" -#include "iwl-debug.h" -#include "iwl-trans.h" -#include "iwl-modparams.h" -#include "dev.h" -#include "agn.h" -#include "commands.h" -#include "power.h" - -static bool force_cam = true; -module_param(force_cam, bool, 0644); -MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); - -/* - * Setting power level allows the card to go to sleep when not busy. - * - * We calculate a sleep command based on the required latency, which - * we get from mac80211. In order to handle thermal throttling, we can - * also use pre-defined power levels. - */ - -/* - * This defines the old power levels. They are still used by default - * (level 1) and for thermal throttle (levels 3 through 5) - */ - -struct iwl_power_vec_entry { - struct iwl_powertable_cmd cmd; - u8 no_dtim; /* number of skip dtim */ -}; - -#define IWL_DTIM_RANGE_0_MAX 2 -#define IWL_DTIM_RANGE_1_MAX 10 - -#define NOSLP cpu_to_le16(0), 0, 0 -#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 -#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \ - IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \ - IWL_POWER_ADVANCE_PM_ENA_MSK) -#define ASLP_TOUT(T) cpu_to_le32(T) -#define TU_TO_USEC 1024 -#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) -#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ - cpu_to_le32(X1), \ - cpu_to_le32(X2), \ - cpu_to_le32(X3), \ - cpu_to_le32(X4)} -/* default power management (not Tx power) table values */ -/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ -/* DTIM 0 - 2 */ -static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { - {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0}, - {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, - {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2} -}; - - -/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ -/* DTIM 3 - 10 */ -static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { - {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, - {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, - {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2} -}; - -/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ -/* DTIM 11 - */ -static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { - {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, - {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, - {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} -}; - -/* advance power management */ -/* DTIM 0 - 2 */ -static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = { - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} -}; - - -/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ -/* DTIM 3 - 10 */ -static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = { - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2} -}; - -/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ -/* DTIM 11 - */ -static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = { - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, - {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), - SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} -}; - -static void iwl_static_sleep_cmd(struct iwl_priv *priv, - struct iwl_powertable_cmd *cmd, - enum iwl_power_level lvl, int period) -{ - const struct iwl_power_vec_entry *table; - int max_sleep[IWL_POWER_VEC_SIZE] = { 0 }; - int i; - u8 skip; - u32 slp_itrvl; - - if (priv->lib->adv_pm) { - table = apm_range_2; - if (period <= IWL_DTIM_RANGE_1_MAX) - table = apm_range_1; - if (period <= IWL_DTIM_RANGE_0_MAX) - table = apm_range_0; - } else { - table = range_2; - if (period <= IWL_DTIM_RANGE_1_MAX) - table = range_1; - if (period <= IWL_DTIM_RANGE_0_MAX) - table = range_0; - } - - if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM)) - memset(cmd, 0, sizeof(*cmd)); - else - *cmd = table[lvl].cmd; - - if (period == 0) { - skip = 0; - period = 1; - for (i = 0; i < IWL_POWER_VEC_SIZE; i++) - max_sleep[i] = 1; - - } else { - skip = table[lvl].no_dtim; - for (i = 0; i < IWL_POWER_VEC_SIZE; i++) - max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]); - max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1; - } - - slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); - /* figure out the listen interval based on dtim period and skip */ - if (slp_itrvl == 0xFF) - cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = - cpu_to_le32(period * (skip + 1)); - - slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); - if (slp_itrvl > period) - cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = - cpu_to_le32((slp_itrvl / period) * period); - - if (skip) - cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; - else - cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; - - if (priv->cfg->base_params->shadow_reg_enable) - cmd->flags |= IWL_POWER_SHADOW_REG_ENA; - else - cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; - - if (iwl_advanced_bt_coexist(priv)) { - if (!priv->lib->bt_params->bt_sco_disable) - cmd->flags |= IWL_POWER_BT_SCO_ENA; - else - cmd->flags &= ~IWL_POWER_BT_SCO_ENA; - } - - - slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); - if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) - cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = - cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL); - - /* enforce max sleep interval */ - for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) { - if (le32_to_cpu(cmd->sleep_interval[i]) > - (max_sleep[i] * period)) - cmd->sleep_interval[i] = - cpu_to_le32(max_sleep[i] * period); - if (i != (IWL_POWER_VEC_SIZE - 1)) { - if (le32_to_cpu(cmd->sleep_interval[i]) > - le32_to_cpu(cmd->sleep_interval[i+1])) - cmd->sleep_interval[i] = - cmd->sleep_interval[i+1]; - } - } - - if (priv->power_data.bus_pm) - cmd->flags |= IWL_POWER_PCI_PM_MSK; - else - cmd->flags &= ~IWL_POWER_PCI_PM_MSK; - - IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n", - skip, period); - /* The power level here is 0-4 (used as array index), but user expects - to see 1-5 (according to spec). */ - IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); -} - -static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, - struct iwl_powertable_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - - if (priv->power_data.bus_pm) - cmd->flags |= IWL_POWER_PCI_PM_MSK; - - IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); -} - -static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) -{ - IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); - IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); - IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); - IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); - IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n", - le32_to_cpu(cmd->sleep_interval[0]), - le32_to_cpu(cmd->sleep_interval[1]), - le32_to_cpu(cmd->sleep_interval[2]), - le32_to_cpu(cmd->sleep_interval[3]), - le32_to_cpu(cmd->sleep_interval[4])); - - return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0, - sizeof(struct iwl_powertable_cmd), cmd); -} - -static void iwl_power_build_cmd(struct iwl_priv *priv, - struct iwl_powertable_cmd *cmd) -{ - bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; - int dtimper; - - if (force_cam) { - iwl_power_sleep_cam_cmd(priv, cmd); - return; - } - - dtimper = priv->hw->conf.ps_dtim_period ?: 1; - - if (priv->wowlan) - iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (!priv->lib->no_idle_support && - priv->hw->conf.flags & IEEE80211_CONF_IDLE) - iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); - else if (iwl_tt_is_low_power_state(priv)) { - /* in thermal throttling low power state */ - iwl_static_sleep_cmd(priv, cmd, - iwl_tt_current_power_mode(priv), dtimper); - } else if (!enabled) - iwl_power_sleep_cam_cmd(priv, cmd); - else if (priv->power_data.debug_sleep_level_override >= 0) - iwl_static_sleep_cmd(priv, cmd, - priv->power_data.debug_sleep_level_override, - dtimper); - else { - /* Note that the user parameter is 1-5 (according to spec), - but we pass 0-4 because it acts as an array index. */ - if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 && - iwlwifi_mod_params.power_level <= IWL_POWER_NUM) - iwl_static_sleep_cmd(priv, cmd, - iwlwifi_mod_params.power_level - 1, dtimper); - else - iwl_static_sleep_cmd(priv, cmd, - IWL_POWER_INDEX_1, dtimper); - } -} - -int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, - bool force) -{ - int ret; - bool update_chains; - - lockdep_assert_held(&priv->mutex); - - /* Don't update the RX chain when chain noise calibration is running */ - update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || - priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; - - if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) - return 0; - - if (!iwl_is_ready_rf(priv)) - return -EIO; - - /* scan complete use sleep_power_next, need to be updated */ - memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); - if (test_bit(STATUS_SCANNING, &priv->status) && !force) { - IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); - return 0; - } - - if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) - iwl_dvm_set_pmi(priv, true); - - ret = iwl_set_power(priv, cmd); - if (!ret) { - if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) - iwl_dvm_set_pmi(priv, false); - - if (update_chains) - iwl_update_chain_flags(priv); - else - IWL_DEBUG_POWER(priv, - "Cannot update the power, chain noise " - "calibration running: %d\n", - priv->chain_noise_data.state); - - memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); - } else - IWL_ERR(priv, "set power fail, ret = %d\n", ret); - - return ret; -} - -int iwl_power_update_mode(struct iwl_priv *priv, bool force) -{ - struct iwl_powertable_cmd cmd; - - iwl_power_build_cmd(priv, &cmd); - return iwl_power_set_mode(priv, &cmd, force); -} - -/* initialize to default */ -void iwl_power_initialize(struct iwl_priv *priv) -{ - priv->power_data.bus_pm = priv->trans->pm_support; - - priv->power_data.debug_sleep_level_override = -1; - - memset(&priv->power_data.sleep_cmd, 0, - sizeof(priv->power_data.sleep_cmd)); -} diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h deleted file mode 100644 index 570d3a5e4670..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/power.h +++ /dev/null @@ -1,47 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#ifndef __iwl_power_setting_h__ -#define __iwl_power_setting_h__ - -#include "commands.h" - -struct iwl_power_mgr { - struct iwl_powertable_cmd sleep_cmd; - struct iwl_powertable_cmd sleep_cmd_next; - int debug_sleep_level_override; - bool bus_pm; -}; - -int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, - bool force); -int iwl_power_update_mode(struct iwl_priv *priv, bool force); -void iwl_power_initialize(struct iwl_priv *priv); - -extern bool no_sleep_autoadjust; - -#endif /* __iwl_power_setting_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c deleted file mode 100644 index cef921c1a623..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ /dev/null @@ -1,3338 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "dev.h" -#include "agn.h" - -#define RS_NAME "iwl-agn-rs" - -#define NUM_TRY_BEFORE_ANT_TOGGLE 1 -#define IWL_NUMBER_TRY 1 -#define IWL_HT_NUMBER_TRY 3 - -#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ -#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ -#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ - -/* max allowed rate miss before sync LQ cmd */ -#define IWL_MISSED_RATE_MAX 15 -/* max time to accum history 2 seconds */ -#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) - -static u8 rs_ht_to_legacy[] = { - IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX -}; - -static const u8 ant_toggle_lookup[] = { - /*ANT_NONE -> */ ANT_NONE, - /*ANT_A -> */ ANT_B, - /*ANT_B -> */ ANT_C, - /*ANT_AB -> */ ANT_BC, - /*ANT_C -> */ ANT_A, - /*ANT_AC -> */ ANT_AB, - /*ANT_BC -> */ ANT_AC, - /*ANT_ABC -> */ ANT_ABC, -}; - -#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ - [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ - IWL_RATE_SISO_##s##M_PLCP, \ - IWL_RATE_MIMO2_##s##M_PLCP,\ - IWL_RATE_MIMO3_##s##M_PLCP,\ - IWL_RATE_##r##M_IEEE, \ - IWL_RATE_##ip##M_INDEX, \ - IWL_RATE_##in##M_INDEX, \ - IWL_RATE_##rp##M_INDEX, \ - IWL_RATE_##rn##M_INDEX, \ - IWL_RATE_##pp##M_INDEX, \ - IWL_RATE_##np##M_INDEX } - -/* - * Parameter order: - * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate - * - * If there isn't a valid next or previous rate then INV is used which - * maps to IWL_RATE_INVALID - * - */ -const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { - IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ - IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ - IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ - IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ - IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ - IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ - IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ - IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ - IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ - IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ - IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ - IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ - IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ - /* FIXME:RS: ^^ should be INV (legacy) */ -}; - -static inline u8 rs_extract_rate(u32 rate_n_flags) -{ - return (u8)(rate_n_flags & RATE_MCS_RATE_MSK); -} - -static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) -{ - int idx = 0; - - /* HT rate format */ - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = rs_extract_rate(rate_n_flags); - - if (idx >= IWL_RATE_MIMO3_6M_PLCP) - idx = idx - IWL_RATE_MIMO3_6M_PLCP; - else if (idx >= IWL_RATE_MIMO2_6M_PLCP) - idx = idx - IWL_RATE_MIMO2_6M_PLCP; - - idx += IWL_FIRST_OFDM_RATE; - /* skip 9M not supported in ht*/ - if (idx >= IWL_RATE_9M_INDEX) - idx += 1; - if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) - return idx; - - /* legacy rate format, search for match in table */ - } else { - for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) - if (iwl_rates[idx].plcp == - rs_extract_rate(rate_n_flags)) - return idx; - } - - return -1; -} - -static void rs_rate_scale_perform(struct iwl_priv *priv, - struct sk_buff *skb, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta); -static void rs_fill_link_cmd(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, u32 rate_n_flags); -static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); - - -#ifdef CONFIG_MAC80211_DEBUGFS -static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index); -#else -static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index) -{} -#endif - -/** - * The following tables contain the expected throughput metrics for all rates - * - * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits - * - * where invalid entries are zeros. - * - * CCK rates are only valid in legacy table and will only be used in G - * (2.4 GHz) band. - */ - -static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = { - 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 -}; - -static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */ - {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */ - {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */ - {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */ -}; - -static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ - {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ - {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */ - {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */ -}; - -static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */ - {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */ - {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */ - {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/ -}; - -static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ - {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ - {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */ - {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ -}; - -static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */ - {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */ - {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */ - {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */ -}; - -static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */ - {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */ - {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */ - {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */ -}; - -/* mbps, mcs */ -static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { - { "1", "BPSK DSSS"}, - { "2", "QPSK DSSS"}, - {"5.5", "BPSK CCK"}, - { "11", "QPSK CCK"}, - { "6", "BPSK 1/2"}, - { "9", "BPSK 1/2"}, - { "12", "QPSK 1/2"}, - { "18", "QPSK 3/4"}, - { "24", "16QAM 1/2"}, - { "36", "16QAM 3/4"}, - { "48", "64QAM 2/3"}, - { "54", "64QAM 3/4"}, - { "60", "64QAM 5/6"}, -}; - -#define MCS_INDEX_PER_STREAM (8) - -static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) -{ - window->data = 0; - window->success_counter = 0; - window->success_ratio = IWL_INVALID_VALUE; - window->counter = 0; - window->average_tpt = IWL_INVALID_VALUE; - window->stamp = 0; -} - -static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) -{ - return (ant_type & valid_antenna) == ant_type; -} - -/* - * removes the old data from the statistics. All data that is older than - * TID_MAX_TIME_DIFF, will be deleted. - */ -static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) -{ - /* The oldest age we want to keep */ - u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; - - while (tl->queue_count && - (tl->time_stamp < oldest_time)) { - tl->total -= tl->packet_count[tl->head]; - tl->packet_count[tl->head] = 0; - tl->time_stamp += TID_QUEUE_CELL_SPACING; - tl->queue_count--; - tl->head++; - if (tl->head >= TID_QUEUE_MAX_SIZE) - tl->head = 0; - } -} - -/* - * increment traffic load value for tid and also remove - * any old values if passed the certain time period - */ -static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, - struct ieee80211_hdr *hdr) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 index; - struct iwl_traffic_load *tl = NULL; - u8 tid; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } else - return IWL_MAX_TID_COUNT; - - if (unlikely(tid >= IWL_MAX_TID_COUNT)) - return IWL_MAX_TID_COUNT; - - tl = &lq_data->load[tid]; - - curr_time -= curr_time % TID_ROUND_VALUE; - - /* Happens only for the first packet. Initialize the data */ - if (!(tl->queue_count)) { - tl->total = 1; - tl->time_stamp = curr_time; - tl->queue_count = 1; - tl->head = 0; - tl->packet_count[0] = 1; - return IWL_MAX_TID_COUNT; - } - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - index = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (index >= TID_QUEUE_MAX_SIZE) - rs_tl_rm_old_stats(tl, curr_time); - - index = (tl->head + index) % TID_QUEUE_MAX_SIZE; - tl->packet_count[index] = tl->packet_count[index] + 1; - tl->total = tl->total + 1; - - if ((index + 1) > tl->queue_count) - tl->queue_count = index + 1; - - return tid; -} - -#ifdef CONFIG_MAC80211_DEBUGFS -/** - * Program the device to use fixed rate for frame transmit - * This is for debugging/testing only - * once the device start use fixed rate, we need to reload the module - * to being back the normal operation. - */ -static void rs_program_fix_rate(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta) -{ - struct iwl_station_priv *sta_priv = - container_of(lq_sta, struct iwl_station_priv, lq_sta); - struct iwl_rxon_context *ctx = sta_priv->ctx; - - lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ - lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - - IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", - lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); - - if (lq_sta->dbg_fixed_rate) { - rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); - iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, - false); - } -} -#endif - -/* - get the traffic load value for tid -*/ -static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 index; - struct iwl_traffic_load *tl = NULL; - - if (tid >= IWL_MAX_TID_COUNT) - return 0; - - tl = &(lq_data->load[tid]); - - curr_time -= curr_time % TID_ROUND_VALUE; - - if (!(tl->queue_count)) - return 0; - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - index = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (index >= TID_QUEUE_MAX_SIZE) - rs_tl_rm_old_stats(tl, curr_time); - - return tl->total; -} - -static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, - struct iwl_lq_sta *lq_data, u8 tid, - struct ieee80211_sta *sta) -{ - int ret = -EAGAIN; - u32 load; - - /* - * Don't create TX aggregation sessions when in high - * BT traffic, as they would just be disrupted by BT. - */ - if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) { - IWL_DEBUG_COEX(priv, - "BT traffic (%d), no aggregation allowed\n", - priv->bt_traffic_load); - return ret; - } - - load = rs_tl_get_load(lq_data, tid); - - IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", - sta->addr, tid); - ret = ieee80211_start_tx_ba_session(sta, tid, 5000); - if (ret == -EAGAIN) { - /* - * driver and mac80211 is out of sync - * this might be cause by reloading firmware - * stop the tx ba session here - */ - IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", - tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - return ret; -} - -static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, - struct iwl_lq_sta *lq_data, - struct ieee80211_sta *sta) -{ - if (tid < IWL_MAX_TID_COUNT) - rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); - else - IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n", - tid, IWL_MAX_TID_COUNT); -} - -static inline int get_num_of_ant_from_rate(u32 rate_n_flags) -{ - return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_C_MSK); -} - -/* - * Static function to get the expected throughput from an iwl_scale_tbl_info - * that wraps a NULL pointer check - */ -static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) -{ - if (tbl->expected_tpt) - return tbl->expected_tpt[rs_index]; - return 0; -} - -/** - * rs_collect_tx_data - Update the success/failure sliding window - * - * We keep a sliding window of the last 62 packets transmitted - * at this rate. window->data contains the bitmask of successful - * packets. - */ -static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes) -{ - struct iwl_rate_scale_data *window = NULL; - static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); - s32 fail_count, tpt; - - if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) - return -EINVAL; - - /* Select window for current tx bit rate */ - window = &(tbl->win[scale_index]); - - /* Get expected throughput */ - tpt = get_expected_tpt(tbl, scale_index); - - /* - * Keep track of only the latest 62 tx frame attempts in this rate's - * history window; anything older isn't really relevant any more. - * If we have filled up the sliding window, drop the oldest attempt; - * if the oldest attempt (highest bit in bitmap) shows "success", - * subtract "1" from the success counter (this is the main reason - * we keep these bitmaps!). - */ - while (attempts > 0) { - if (window->counter >= IWL_RATE_MAX_WINDOW) { - - /* remove earliest */ - window->counter = IWL_RATE_MAX_WINDOW - 1; - - if (window->data & mask) { - window->data &= ~mask; - window->success_counter--; - } - } - - /* Increment frames-attempted counter */ - window->counter++; - - /* Shift bitmap by one frame to throw away oldest history */ - window->data <<= 1; - - /* Mark the most recent #successes attempts as successful */ - if (successes > 0) { - window->success_counter++; - window->data |= 0x1; - successes--; - } - - attempts--; - } - - /* Calculate current success ratio, avoid divide-by-0! */ - if (window->counter > 0) - window->success_ratio = 128 * (100 * window->success_counter) - / window->counter; - else - window->success_ratio = IWL_INVALID_VALUE; - - fail_count = window->counter - window->success_counter; - - /* Calculate average throughput, if we have enough history. */ - if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || - (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) - window->average_tpt = (window->success_ratio * tpt + 64) / 128; - else - window->average_tpt = IWL_INVALID_VALUE; - - /* Tag this window as having been updated */ - window->stamp = jiffies; - - return 0; -} - -/* - * Fill uCode API rate_n_flags field, based on "search" or "active" table. - */ -/* FIXME:RS:remove this function and put the flags statically in the table */ -static u32 rate_n_flags_from_tbl(struct iwl_priv *priv, - struct iwl_scale_tbl_info *tbl, - int index, u8 use_green) -{ - u32 rate_n_flags = 0; - - if (is_legacy(tbl->lq_type)) { - rate_n_flags = iwl_rates[index].plcp; - if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) - rate_n_flags |= RATE_MCS_CCK_MSK; - - } else if (is_Ht(tbl->lq_type)) { - if (index > IWL_LAST_OFDM_RATE) { - IWL_ERR(priv, "Invalid HT rate index %d\n", index); - index = IWL_LAST_OFDM_RATE; - } - rate_n_flags = RATE_MCS_HT_MSK; - - if (is_siso(tbl->lq_type)) - rate_n_flags |= iwl_rates[index].plcp_siso; - else if (is_mimo2(tbl->lq_type)) - rate_n_flags |= iwl_rates[index].plcp_mimo2; - else - rate_n_flags |= iwl_rates[index].plcp_mimo3; - } else { - IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); - } - - rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & - RATE_MCS_ANT_ABC_MSK); - - if (is_Ht(tbl->lq_type)) { - if (tbl->is_ht40) { - if (tbl->is_dup) - rate_n_flags |= RATE_MCS_DUP_MSK; - else - rate_n_flags |= RATE_MCS_HT40_MSK; - } - if (tbl->is_SGI) - rate_n_flags |= RATE_MCS_SGI_MSK; - - if (use_green) { - rate_n_flags |= RATE_MCS_GF_MSK; - if (is_siso(tbl->lq_type) && tbl->is_SGI) { - rate_n_flags &= ~RATE_MCS_SGI_MSK; - IWL_ERR(priv, "GF was set with SGI:SISO\n"); - } - } - } - return rate_n_flags; -} - -/* - * Interpret uCode API's rate_n_flags format, - * fill "search" or "active" tx mode table. - */ -static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, - enum ieee80211_band band, - struct iwl_scale_tbl_info *tbl, - int *rate_idx) -{ - u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); - u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); - u8 mcs; - - memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); - *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); - - if (*rate_idx == IWL_RATE_INVALID) { - *rate_idx = -1; - return -EINVAL; - } - tbl->is_SGI = 0; /* default legacy setup */ - tbl->is_ht40 = 0; - tbl->is_dup = 0; - tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); - tbl->lq_type = LQ_NONE; - tbl->max_search = IWL_MAX_SEARCH; - - /* legacy rate format */ - if (!(rate_n_flags & RATE_MCS_HT_MSK)) { - if (num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) - tbl->lq_type = LQ_A; - else - tbl->lq_type = LQ_G; - } - /* HT rate format */ - } else { - if (rate_n_flags & RATE_MCS_SGI_MSK) - tbl->is_SGI = 1; - - if ((rate_n_flags & RATE_MCS_HT40_MSK) || - (rate_n_flags & RATE_MCS_DUP_MSK)) - tbl->is_ht40 = 1; - - if (rate_n_flags & RATE_MCS_DUP_MSK) - tbl->is_dup = 1; - - mcs = rs_extract_rate(rate_n_flags); - - /* SISO */ - if (mcs <= IWL_RATE_SISO_60M_PLCP) { - if (num_of_ant == 1) - tbl->lq_type = LQ_SISO; /*else NONE*/ - /* MIMO2 */ - } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { - if (num_of_ant == 2) - tbl->lq_type = LQ_MIMO2; - /* MIMO3 */ - } else { - if (num_of_ant == 3) { - tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; - tbl->lq_type = LQ_MIMO3; - } - } - } - return 0; -} - -/* switch to another antenna/antennas and return 1 */ -/* if no other valid antenna found, return 0 */ -static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, - struct iwl_scale_tbl_info *tbl) -{ - u8 new_ant_type; - - if (!tbl->ant_type || tbl->ant_type > ANT_ABC) - return 0; - - if (!rs_is_valid_ant(valid_ant, tbl->ant_type)) - return 0; - - new_ant_type = ant_toggle_lookup[tbl->ant_type]; - - while ((new_ant_type != tbl->ant_type) && - !rs_is_valid_ant(valid_ant, new_ant_type)) - new_ant_type = ant_toggle_lookup[new_ant_type]; - - if (new_ant_type == tbl->ant_type) - return 0; - - tbl->ant_type = new_ant_type; - *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; - *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; - return 1; -} - -/** - * Green-field mode is valid if the station supports it and - * there are no non-GF stations present in the BSS. - */ -static bool rs_use_green(struct ieee80211_sta *sta) -{ - /* - * There's a bug somewhere in this code that causes the - * scaling to get stuck because GF+SGI can't be combined - * in SISO rates. Until we find that bug, disable GF, it - * has only limited benefit and we still interoperate with - * GF APs since we can always receive GF transmissions. - */ - return false; -} - -/** - * rs_get_supported_rates - get the available rates - * - * if management frame or broadcast frame only return - * basic available rates. - * - */ -static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, - struct ieee80211_hdr *hdr, - enum iwl_table_type rate_type) -{ - if (is_legacy(rate_type)) { - return lq_sta->active_legacy_rate; - } else { - if (is_siso(rate_type)) - return lq_sta->active_siso_rate; - else if (is_mimo2(rate_type)) - return lq_sta->active_mimo2_rate; - else - return lq_sta->active_mimo3_rate; - } -} - -static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, - int rate_type) -{ - u8 high = IWL_RATE_INVALID; - u8 low = IWL_RATE_INVALID; - - /* 802.11A or ht walks to the next literal adjacent rate in - * the rate table */ - if (is_a_band(rate_type) || !is_legacy(rate_type)) { - int i; - u32 mask; - - /* Find the previous rate that is in the rate mask */ - i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { - if (rate_mask & mask) { - low = i; - break; - } - } - - /* Find the next rate that is in the rate mask */ - i = index + 1; - for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { - if (rate_mask & mask) { - high = i; - break; - } - } - - return (high << 8) | low; - } - - low = index; - while (low != IWL_RATE_INVALID) { - low = iwl_rates[low].prev_rs; - if (low == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << low)) - break; - IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); - } - - high = index; - while (high != IWL_RATE_INVALID) { - high = iwl_rates[high].next_rs; - if (high == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << high)) - break; - IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); - } - - return (high << 8) | low; -} - -static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - u8 scale_index, u8 ht_possible) -{ - s32 low; - u16 rate_mask; - u16 high_low; - u8 switch_to_legacy = 0; - u8 is_green = lq_sta->is_green; - struct iwl_priv *priv = lq_sta->drv; - - /* check if we need to switch from HT to legacy rates. - * assumption is that mandatory rates (1Mbps or 6Mbps) - * are always supported (spec demand) */ - if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { - switch_to_legacy = 1; - scale_index = rs_ht_to_legacy[scale_index]; - if (lq_sta->band == IEEE80211_BAND_5GHZ) - tbl->lq_type = LQ_A; - else - tbl->lq_type = LQ_G; - - if (num_of_ant(tbl->ant_type) > 1) - tbl->ant_type = - first_antenna(priv->nvm_data->valid_tx_ant); - - tbl->is_ht40 = 0; - tbl->is_SGI = 0; - tbl->max_search = IWL_MAX_SEARCH; - } - - rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); - - /* Mask with station rate restriction */ - if (is_legacy(tbl->lq_type)) { - /* supp_rates has no CCK bits in A mode */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) - rate_mask = (u16)(rate_mask & - (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); - else - rate_mask = (u16)(rate_mask & lq_sta->supp_rates); - } - - /* If we switched from HT to legacy, check current rate */ - if (switch_to_legacy && (rate_mask & (1 << scale_index))) { - low = scale_index; - goto out; - } - - high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask, - tbl->lq_type); - low = high_low & 0xff; - - if (low == IWL_RATE_INVALID) - low = scale_index; - -out: - return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); -} - -/* - * Simple function to compare two rate scale table types - */ -static bool table_type_matches(struct iwl_scale_tbl_info *a, - struct iwl_scale_tbl_info *b) -{ - return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && - (a->is_SGI == b->is_SGI); -} - -static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct iwl_lq_sta *lq_sta) -{ - struct iwl_scale_tbl_info *tbl; - bool full_concurrent = priv->bt_full_concurrent; - - if (priv->bt_ant_couple_ok) { - /* - * Is there a need to switch between - * full concurrency and 3-wire? - */ - if (priv->bt_ci_compliance && priv->bt_ant_couple_ok) - full_concurrent = true; - else - full_concurrent = false; - } - if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || - (priv->bt_full_concurrent != full_concurrent)) { - priv->bt_full_concurrent = full_concurrent; - priv->last_bt_traffic_load = priv->bt_traffic_load; - - /* Update uCode's rate table. */ - tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); - iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); - - queue_work(priv->workqueue, &priv->bt_full_concurrency); - } -} - -/* - * mac80211 sends us Tx status - */ -static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) -{ - int legacy_success; - int retries; - int rs_index, mac_index, i; - struct iwl_lq_sta *lq_sta = priv_sta; - struct iwl_link_quality_cmd *table; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r; - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - enum mac80211_rate_control_flags mac_flags; - u32 tx_rate; - struct iwl_scale_tbl_info tbl_type; - struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!lq_sta) { - IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n"); - return; - } else if (!lq_sta->drv) { - IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); - return; - } - - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - - /* This packet was aggregated but doesn't carry status info */ - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && - !(info->flags & IEEE80211_TX_STAT_AMPDU)) - return; - - /* - * Ignore this Tx frame response if its initial rate doesn't match - * that of latest Link Quality command. There may be stragglers - * from a previous Link Quality command, but we're no longer interested - * in those; they're either from the "active" mode while we're trying - * to check "search" mode, or a prior "search" mode after we've moved - * to a new "search" mode (which might become the new "active" mode). - */ - table = &lq_sta->lq; - tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); - rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); - if (priv->band == IEEE80211_BAND_5GHZ) - rs_index -= IWL_FIRST_OFDM_RATE; - mac_flags = info->status.rates[0].flags; - mac_index = info->status.rates[0].idx; - /* For HT packets, map MCS to PLCP */ - if (mac_flags & IEEE80211_TX_RC_MCS) { - mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ - if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) - mac_index++; - /* - * mac80211 HT index is always zero-indexed; we need to move - * HT OFDM rates after CCK rates in 2.4 GHz band - */ - if (priv->band == IEEE80211_BAND_2GHZ) - mac_index += IWL_FIRST_OFDM_RATE; - } - /* Here we actually compare this rate to the latest LQ command */ - if ((mac_index < 0) || - (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || - (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || - (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || - (tbl_type.ant_type != info->status.antenna) || - (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) || - (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || - (rs_index != mac_index)) { - IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate); - /* - * Since rates mis-match, the last LQ command may have failed. - * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with - * ... driver. - */ - lq_sta->missed_rate_counter++; - if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { - lq_sta->missed_rate_counter = 0; - iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); - } - /* Regardless, ignore this status info for outdated rate */ - return; - } else - /* Rate did match, so reset the missed_rate_counter */ - lq_sta->missed_rate_counter = 0; - - /* Figure out if rate scale algorithm is in active or search table */ - if (table_type_matches(&tbl_type, - &(lq_sta->lq_info[lq_sta->active_tbl]))) { - curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - } else if (table_type_matches(&tbl_type, - &lq_sta->lq_info[1 - lq_sta->active_tbl])) { - curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - } else { - IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); - tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n", - tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); - tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n", - tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); - IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n", - tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI); - /* - * no matching table found, let's by-pass the data collection - * and continue to perform rate scale to find the rate table - */ - rs_stay_in_table(lq_sta, true); - goto done; - } - - /* - * Updating the frame history depends on whether packets were - * aggregated. - * - * For aggregation, all packets were transmitted at the same rate, the - * first index into rate scale table. - */ - if (info->flags & IEEE80211_TX_STAT_AMPDU) { - tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); - rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, - &rs_index); - rs_collect_tx_data(curr_tbl, rs_index, - info->status.ampdu_len, - info->status.ampdu_ack_len); - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->stay_in_tbl) { - lq_sta->total_success += info->status.ampdu_ack_len; - lq_sta->total_failed += (info->status.ampdu_len - - info->status.ampdu_ack_len); - } - } else { - /* - * For legacy, update frame history with for each Tx retry. - */ - retries = info->status.rates[0].count - 1; - /* HW doesn't send more than 15 retries */ - retries = min(retries, 15); - - /* The last transmission may have been successful */ - legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); - /* Collect data for each rate used during failed TX attempts */ - for (i = 0; i <= retries; ++i) { - tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); - rs_get_tbl_info_from_mcs(tx_rate, priv->band, - &tbl_type, &rs_index); - /* - * Only collect stats if retried rate is in the same RS - * table as active/search. - */ - if (table_type_matches(&tbl_type, curr_tbl)) - tmp_tbl = curr_tbl; - else if (table_type_matches(&tbl_type, other_tbl)) - tmp_tbl = other_tbl; - else - continue; - rs_collect_tx_data(tmp_tbl, rs_index, 1, - i < retries ? 0 : legacy_success); - } - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->stay_in_tbl) { - lq_sta->total_success += legacy_success; - lq_sta->total_failed += retries + (1 - legacy_success); - } - } - /* The last TX rate is cached in lq_sta; it's set in if/else above */ - lq_sta->last_rate_n_flags = tx_rate; -done: - /* See if there's a better rate or modulation mode to try. */ - if (sta && sta->supp_rates[sband->band]) - rs_rate_scale_perform(priv, skb, sta, lq_sta); - - if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) - rs_bt_update_lq(priv, ctx, lq_sta); -} - -/* - * Begin a period of staying with a selected modulation mode. - * Set "stay_in_tbl" flag to prevent any mode switches. - * Set frame tx success limits according to legacy vs. high-throughput, - * and reset overall (spanning all rates) tx success history statistics. - * These control how long we stay using same modulation mode before - * searching for a new mode. - */ -static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, - struct iwl_lq_sta *lq_sta) -{ - IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); - lq_sta->stay_in_tbl = 1; /* only place this gets set */ - if (is_legacy) { - lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; - lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; - } else { - lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; - lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; - } - lq_sta->table_count = 0; - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = jiffies; - lq_sta->action_counter = 0; -} - -/* - * Find correct throughput table for given mode of modulation - */ -static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl) -{ - /* Used to choose among HT tables */ - const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; - - /* Check for invalid LQ type */ - if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { - tbl->expected_tpt = expected_tpt_legacy; - return; - } - - /* Legacy rates have only one table */ - if (is_legacy(tbl->lq_type)) { - tbl->expected_tpt = expected_tpt_legacy; - return; - } - - /* Choose among many HT tables depending on number of streams - * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation - * status */ - if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_siso20MHz; - else if (is_siso(tbl->lq_type)) - ht_tbl_pointer = expected_tpt_siso40MHz; - else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_mimo2_20MHz; - else if (is_mimo2(tbl->lq_type)) - ht_tbl_pointer = expected_tpt_mimo2_40MHz; - else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) - ht_tbl_pointer = expected_tpt_mimo3_20MHz; - else /* if (is_mimo3(tbl->lq_type)) <-- must be true */ - ht_tbl_pointer = expected_tpt_mimo3_40MHz; - - if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ - tbl->expected_tpt = ht_tbl_pointer[0]; - else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ - tbl->expected_tpt = ht_tbl_pointer[1]; - else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ - tbl->expected_tpt = ht_tbl_pointer[2]; - else /* AGG+SGI */ - tbl->expected_tpt = ht_tbl_pointer[3]; -} - -/* - * Find starting rate for new "search" high-throughput mode of modulation. - * Goal is to find lowest expected rate (under perfect conditions) that is - * above the current measured throughput of "active" mode, to give new mode - * a fair chance to prove itself without too many challenges. - * - * This gets called when transitioning to more aggressive modulation - * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive - * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need - * to decrease to match "active" throughput. When moving from MIMO to SISO, - * bit rate will typically need to increase, but not if performance was bad. - */ -static s32 rs_get_best_rate(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, /* "search" */ - u16 rate_mask, s8 index) -{ - /* "active" values */ - struct iwl_scale_tbl_info *active_tbl = - &(lq_sta->lq_info[lq_sta->active_tbl]); - s32 active_sr = active_tbl->win[index].success_ratio; - s32 active_tpt = active_tbl->expected_tpt[index]; - /* expected "search" throughput */ - const u16 *tpt_tbl = tbl->expected_tpt; - - s32 new_rate, high, low, start_hi; - u16 high_low; - s8 rate = index; - - new_rate = high = low = start_hi = IWL_RATE_INVALID; - - for (; ;) { - high_low = rs_get_adjacent_rate(priv, rate, rate_mask, - tbl->lq_type); - - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* - * Lower the "search" bit rate, to give new "search" mode - * approximately the same throughput as "active" if: - * - * 1) "Active" mode has been working modestly well (but not - * great), and expected "search" throughput (under perfect - * conditions) at candidate rate is above the actual - * measured "active" throughput (but less than expected - * "active" throughput under perfect conditions). - * OR - * 2) "Active" mode has been working perfectly or very well - * and expected "search" throughput (under perfect - * conditions) at candidate rate is above expected - * "active" throughput (under perfect conditions). - */ - if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && - ((active_sr > IWL_RATE_DECREASE_TH) && - (active_sr <= IWL_RATE_HIGH_TH) && - (tpt_tbl[rate] <= active_tpt))) || - ((active_sr >= IWL_RATE_SCALE_SWITCH) && - (tpt_tbl[rate] > active_tpt))) { - - /* (2nd or later pass) - * If we've already tried to raise the rate, and are - * now trying to lower it, use the higher rate. */ - if (start_hi != IWL_RATE_INVALID) { - new_rate = start_hi; - break; - } - - new_rate = rate; - - /* Loop again with lower rate */ - if (low != IWL_RATE_INVALID) - rate = low; - - /* Lower rate not available, use the original */ - else - break; - - /* Else try to raise the "search" rate to match "active" */ - } else { - /* (2nd or later pass) - * If we've already tried to lower the rate, and are - * now trying to raise it, use the lower rate. */ - if (new_rate != IWL_RATE_INVALID) - break; - - /* Loop again with higher rate */ - else if (high != IWL_RATE_INVALID) { - start_hi = high; - rate = high; - - /* Higher rate not available, use the original */ - } else { - new_rate = rate; - break; - } - } - } - - return new_rate; -} - -/* - * Set up search table for MIMO2 - */ -static int rs_switch_to_mimo2(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, int index) -{ - u16 rate_mask; - s32 rate; - s8 is_green = lq_sta->is_green; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - if (sta->smps_mode == IEEE80211_SMPS_STATIC) - return -1; - - /* Need both Tx chains/antennas to support MIMO */ - if (priv->hw_params.tx_chains_num < 2) - return -1; - - IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); - - tbl->lq_type = LQ_MIMO2; - tbl->is_dup = lq_sta->is_dup; - tbl->action = 0; - tbl->max_search = IWL_MAX_SEARCH; - rate_mask = lq_sta->active_mimo2_rate; - - if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - rs_set_expected_tpt_table(lq_sta, tbl); - - rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); - - IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask); - if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { - IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n", - rate, rate_mask); - return -1; - } - tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); - - IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", - tbl->current_rate, is_green); - return 0; -} - -/* - * Set up search table for MIMO3 - */ -static int rs_switch_to_mimo3(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, int index) -{ - u16 rate_mask; - s32 rate; - s8 is_green = lq_sta->is_green; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - if (sta->smps_mode == IEEE80211_SMPS_STATIC) - return -1; - - /* Need both Tx chains/antennas to support MIMO */ - if (priv->hw_params.tx_chains_num < 3) - return -1; - - IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); - - tbl->lq_type = LQ_MIMO3; - tbl->is_dup = lq_sta->is_dup; - tbl->action = 0; - tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; - rate_mask = lq_sta->active_mimo3_rate; - - if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - rs_set_expected_tpt_table(lq_sta, tbl); - - rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); - - IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n", - rate, rate_mask); - if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { - IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n", - rate, rate_mask); - return -1; - } - tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); - - IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", - tbl->current_rate, is_green); - return 0; -} - -/* - * Set up search table for SISO - */ -static int rs_switch_to_siso(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, int index) -{ - u16 rate_mask; - u8 is_green = lq_sta->is_green; - s32 rate; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) - return -1; - - IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); - - tbl->is_dup = lq_sta->is_dup; - tbl->lq_type = LQ_SISO; - tbl->action = 0; - tbl->max_search = IWL_MAX_SEARCH; - rate_mask = lq_sta->active_siso_rate; - - if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - if (is_green) - tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ - - rs_set_expected_tpt_table(lq_sta, tbl); - rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); - - IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); - if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { - IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n", - rate, rate_mask); - return -1; - } - tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); - IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", - tbl->current_rate, is_green); - return 0; -} - -/* - * Try to switch to new modulation mode from legacy - */ -static void rs_move_legacy_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - int index) -{ - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - int ret = 0; - u8 update_search_tbl_counter = 0; - - switch (priv->bt_traffic_load) { - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - /* nothing */ - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - /* avoid antenna B unless MIMO */ - if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2) - tbl->action = IWL_LEGACY_SWITCH_SISO; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - /* avoid antenna B and MIMO */ - valid_tx_ant = - first_antenna(priv->nvm_data->valid_tx_ant); - if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && - tbl->action != IWL_LEGACY_SWITCH_SISO) - tbl->action = IWL_LEGACY_SWITCH_SISO; - break; - default: - IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); - break; - } - - if (!iwl_ht_enabled(priv)) - /* stay in Legacy */ - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; - else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && - tbl->action > IWL_LEGACY_SWITCH_SISO) - tbl->action = IWL_LEGACY_SWITCH_SISO; - - /* configure as 1x1 if bt full concurrency */ - if (priv->bt_full_concurrent) { - if (!iwl_ht_enabled(priv)) - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; - else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) - tbl->action = IWL_LEGACY_SWITCH_SISO; - valid_tx_ant = - first_antenna(priv->nvm_data->valid_tx_ant); - } - - start_action = tbl->action; - for (; ;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_LEGACY_SWITCH_ANTENNA1: - case IWL_LEGACY_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); - - if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && - tx_chains_num <= 1) || - (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && - tx_chains_num <= 2)) - break; - - /* Don't change antenna if success has been great */ - if (window->success_ratio >= IWL_RS_GOOD_RATIO && - !priv->bt_full_concurrent && - priv->bt_traffic_load == - IWL_BT_COEX_TRAFFIC_LOAD_NONE) - break; - - /* Set up search table to try other antenna */ - memcpy(search_tbl, tbl, sz); - - if (rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) { - update_search_tbl_counter = 1; - rs_set_expected_tpt_table(lq_sta, search_tbl); - goto out; - } - break; - case IWL_LEGACY_SWITCH_SISO: - IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); - - /* Set up search table to try SISO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - ret = rs_switch_to_siso(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - - break; - case IWL_LEGACY_SWITCH_MIMO2_AB: - case IWL_LEGACY_SWITCH_MIMO2_AC: - case IWL_LEGACY_SWITCH_MIMO2_BC: - IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); - - /* Set up search table to try MIMO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - break; - - case IWL_LEGACY_SWITCH_MIMO3_ABC: - IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n"); - - /* Set up search table to try MIMO3 */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - search_tbl->ant_type = ANT_ABC; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - break; - } - tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - - } - search_tbl->lq_type = LQ_NONE; - return; - -out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) - tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; -} - -/* - * Try to switch to new modulation mode from SISO - */ -static void rs_move_siso_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) -{ - u8 is_green = lq_sta->is_green; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - u8 update_search_tbl_counter = 0; - int ret; - - switch (priv->bt_traffic_load) { - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - /* nothing */ - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - /* avoid antenna B unless MIMO */ - if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) - tbl->action = IWL_SISO_SWITCH_MIMO2_AB; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - /* avoid antenna B and MIMO */ - valid_tx_ant = - first_antenna(priv->nvm_data->valid_tx_ant); - if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - break; - default: - IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); - break; - } - - if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && - tbl->action > IWL_SISO_SWITCH_ANTENNA2) { - /* stay in SISO */ - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - } - - /* configure as 1x1 if bt full concurrency */ - if (priv->bt_full_concurrent) { - valid_tx_ant = - first_antenna(priv->nvm_data->valid_tx_ant); - if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - } - - start_action = tbl->action; - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_SISO_SWITCH_ANTENNA1: - case IWL_SISO_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); - if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && - tx_chains_num <= 1) || - (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && - tx_chains_num <= 2)) - break; - - if (window->success_ratio >= IWL_RS_GOOD_RATIO && - !priv->bt_full_concurrent && - priv->bt_traffic_load == - IWL_BT_COEX_TRAFFIC_LOAD_NONE) - break; - - memcpy(search_tbl, tbl, sz); - if (rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) { - update_search_tbl_counter = 1; - goto out; - } - break; - case IWL_SISO_SWITCH_MIMO2_AB: - case IWL_SISO_SWITCH_MIMO2_AC: - case IWL_SISO_SWITCH_MIMO2_BC: - IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) - goto out; - break; - case IWL_SISO_SWITCH_GI: - if (!tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - break; - - IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); - - memcpy(search_tbl, tbl, sz); - if (is_green) { - if (!tbl->is_SGI) - break; - else - IWL_ERR(priv, - "SGI was set in GF+SISO\n"); - } - search_tbl->is_SGI = !tbl->is_SGI; - rs_set_expected_tpt_table(lq_sta, search_tbl); - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[index]) - break; - } - search_tbl->current_rate = - rate_n_flags_from_tbl(priv, search_tbl, - index, is_green); - update_search_tbl_counter = 1; - goto out; - case IWL_SISO_SWITCH_MIMO3_ABC: - IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - search_tbl->ant_type = ANT_ABC; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) - goto out; - break; - } - tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return; - - out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) - tbl->action = IWL_SISO_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; -} - -/* - * Try to switch to new modulation mode from MIMO2 - */ -static void rs_move_mimo2_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) -{ - s8 is_green = lq_sta->is_green; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - u8 update_search_tbl_counter = 0; - int ret; - - switch (priv->bt_traffic_load) { - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - /* nothing */ - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - /* avoid antenna B and MIMO */ - if (tbl->action != IWL_MIMO2_SWITCH_SISO_A) - tbl->action = IWL_MIMO2_SWITCH_SISO_A; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - /* avoid antenna B unless MIMO */ - if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || - tbl->action == IWL_MIMO2_SWITCH_SISO_C) - tbl->action = IWL_MIMO2_SWITCH_SISO_A; - break; - default: - IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); - break; - } - - if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && - (tbl->action < IWL_MIMO2_SWITCH_SISO_A || - tbl->action > IWL_MIMO2_SWITCH_SISO_C)) { - /* switch in SISO */ - tbl->action = IWL_MIMO2_SWITCH_SISO_A; - } - - /* configure as 1x1 if bt full concurrency */ - if (priv->bt_full_concurrent && - (tbl->action < IWL_MIMO2_SWITCH_SISO_A || - tbl->action > IWL_MIMO2_SWITCH_SISO_C)) - tbl->action = IWL_MIMO2_SWITCH_SISO_A; - - start_action = tbl->action; - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_MIMO2_SWITCH_ANTENNA1: - case IWL_MIMO2_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); - - if (tx_chains_num <= 2) - break; - - if (window->success_ratio >= IWL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) { - update_search_tbl_counter = 1; - goto out; - } - break; - case IWL_MIMO2_SWITCH_SISO_A: - case IWL_MIMO2_SWITCH_SISO_B: - case IWL_MIMO2_SWITCH_SISO_C: - IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); - - /* Set up new search table for SISO */ - memcpy(search_tbl, tbl, sz); - - if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) - search_tbl->ant_type = ANT_A; - else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) - search_tbl->ant_type = ANT_B; - else - search_tbl->ant_type = ANT_C; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_siso(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - - case IWL_MIMO2_SWITCH_GI: - if (!tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - break; - - IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); - - /* Set up new search table for MIMO2 */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = !tbl->is_SGI; - rs_set_expected_tpt_table(lq_sta, search_tbl); - /* - * If active table already uses the fastest possible - * modulation (dual stream with short guard interval), - * and it's working well, there's no need to look - * for a better type of modulation! - */ - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[index]) - break; - } - search_tbl->current_rate = - rate_n_flags_from_tbl(priv, search_tbl, - index, is_green); - update_search_tbl_counter = 1; - goto out; - - case IWL_MIMO2_SWITCH_MIMO3_ABC: - IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - search_tbl->ant_type = ANT_ABC; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - } - tbl->action++; - if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) - tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return; - out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) - tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - -} - -/* - * Try to switch to new modulation mode from MIMO3 - */ -static void rs_move_mimo3_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) -{ - s8 is_green = lq_sta->is_green; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = priv->nvm_data->valid_tx_ant; - u8 tx_chains_num = priv->hw_params.tx_chains_num; - int ret; - u8 update_search_tbl_counter = 0; - - switch (priv->bt_traffic_load) { - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - /* nothing */ - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - /* avoid antenna B and MIMO */ - if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) - tbl->action = IWL_MIMO3_SWITCH_SISO_A; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - /* avoid antenna B unless MIMO */ - if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || - tbl->action == IWL_MIMO3_SWITCH_SISO_C) - tbl->action = IWL_MIMO3_SWITCH_SISO_A; - break; - default: - IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load); - break; - } - - if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && - (tbl->action < IWL_MIMO3_SWITCH_SISO_A || - tbl->action > IWL_MIMO3_SWITCH_SISO_C)) { - /* switch in SISO */ - tbl->action = IWL_MIMO3_SWITCH_SISO_A; - } - - /* configure as 1x1 if bt full concurrency */ - if (priv->bt_full_concurrent && - (tbl->action < IWL_MIMO3_SWITCH_SISO_A || - tbl->action > IWL_MIMO3_SWITCH_SISO_C)) - tbl->action = IWL_MIMO3_SWITCH_SISO_A; - - start_action = tbl->action; - for (;;) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_MIMO3_SWITCH_ANTENNA1: - case IWL_MIMO3_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n"); - - if (tx_chains_num <= 3) - break; - - if (window->success_ratio >= IWL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, search_tbl)) - goto out; - break; - case IWL_MIMO3_SWITCH_SISO_A: - case IWL_MIMO3_SWITCH_SISO_B: - case IWL_MIMO3_SWITCH_SISO_C: - IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n"); - - /* Set up new search table for SISO */ - memcpy(search_tbl, tbl, sz); - - if (tbl->action == IWL_MIMO3_SWITCH_SISO_A) - search_tbl->ant_type = ANT_A; - else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B) - search_tbl->ant_type = ANT_B; - else - search_tbl->ant_type = ANT_C; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_siso(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - - case IWL_MIMO3_SWITCH_MIMO2_AB: - case IWL_MIMO3_SWITCH_MIMO2_AC: - case IWL_MIMO3_SWITCH_MIMO2_BC: - IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n"); - - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - - case IWL_MIMO3_SWITCH_GI: - if (!tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - break; - - IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n"); - - /* Set up new search table for MIMO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = !tbl->is_SGI; - rs_set_expected_tpt_table(lq_sta, search_tbl); - /* - * If active table already uses the fastest possible - * modulation (dual stream with short guard interval), - * and it's working well, there's no need to look - * for a better type of modulation! - */ - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[index]) - break; - } - search_tbl->current_rate = - rate_n_flags_from_tbl(priv, search_tbl, - index, is_green); - update_search_tbl_counter = 1; - goto out; - } - tbl->action++; - if (tbl->action > IWL_MIMO3_SWITCH_GI) - tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return; - out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_MIMO3_SWITCH_GI) - tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; -} - -/* - * Check whether we should continue using same modulation mode, or - * begin search for a new mode, based on: - * 1) # tx successes or failures while using this mode - * 2) # times calling this function - * 3) elapsed time in this mode (not used, for now) - */ -static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) -{ - struct iwl_scale_tbl_info *tbl; - int i; - int active_tbl; - int flush_interval_passed = 0; - struct iwl_priv *priv; - - priv = lq_sta->drv; - active_tbl = lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - /* If we've been disallowing search, see if we should now allow it */ - if (lq_sta->stay_in_tbl) { - - /* Elapsed time using current modulation mode */ - if (lq_sta->flush_timer) - flush_interval_passed = - time_after(jiffies, - (unsigned long)(lq_sta->flush_timer + - IWL_RATE_SCALE_FLUSH_INTVL)); - - /* - * Check if we should allow search for new modulation mode. - * If many frames have failed or succeeded, or we've used - * this same modulation for a long time, allow search, and - * reset history stats that keep track of whether we should - * allow a new search. Also (below) reset all bitmaps and - * stats in active history. - */ - if (force_search || - (lq_sta->total_failed > lq_sta->max_failure_limit) || - (lq_sta->total_success > lq_sta->max_success_limit) || - ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) - && (flush_interval_passed))) { - IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n", - lq_sta->total_failed, - lq_sta->total_success, - flush_interval_passed); - - /* Allow search for new mode */ - lq_sta->stay_in_tbl = 0; /* only place reset */ - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = 0; - - /* - * Else if we've used this modulation mode enough repetitions - * (regardless of elapsed time or success/failure), reset - * history bitmaps and rate-specific stats for all rates in - * active table. - */ - } else { - lq_sta->table_count++; - if (lq_sta->table_count >= - lq_sta->table_count_limit) { - lq_sta->table_count = 0; - - IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n"); - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window( - &(tbl->win[i])); - } - } - - /* If transitioning to allow "search", reset all history - * bitmaps and stats in active table (this will become the new - * "search" table). */ - if (!lq_sta->stay_in_tbl) { - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window(&(tbl->win[i])); - } - } -} - -/* - * setup rate table in uCode - */ -static void rs_update_rate_tbl(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - int index, u8 is_green) -{ - u32 rate; - - /* Update uCode's rate table. */ - rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); - rs_fill_link_cmd(priv, lq_sta, rate); - iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); -} - -/* - * Do rate scaling and search for new modulation mode. - */ -static void rs_rate_scale_perform(struct iwl_priv *priv, - struct sk_buff *skb, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) -{ - struct ieee80211_hw *hw = priv->hw; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int low = IWL_RATE_INVALID; - int high = IWL_RATE_INVALID; - int index; - int i; - struct iwl_rate_scale_data *window = NULL; - int current_tpt = IWL_INVALID_VALUE; - int low_tpt = IWL_INVALID_VALUE; - int high_tpt = IWL_INVALID_VALUE; - u32 fail_count; - s8 scale_action = 0; - u16 rate_mask; - u8 update_lq = 0; - struct iwl_scale_tbl_info *tbl, *tbl1; - u16 rate_scale_index_msk = 0; - u8 is_green = 0; - u8 active_tbl = 0; - u8 done_search = 0; - u16 high_low; - s32 sr; - u8 tid = IWL_MAX_TID_COUNT; - struct iwl_tid_data *tid_data; - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); - - /* Send management frames and NO_ACK data using lowest rate. */ - /* TODO: this could probably be improved.. */ - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - - lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; - - tid = rs_tl_add_packet(lq_sta, hdr); - if ((tid != IWL_MAX_TID_COUNT) && - (lq_sta->tx_agg_tid_en & (1 << tid))) { - tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid]; - if (tid_data->agg.state == IWL_AGG_OFF) - lq_sta->is_agg = 0; - else - lq_sta->is_agg = 1; - } else - lq_sta->is_agg = 0; - - /* - * Select rate-scale / modulation-mode table to work with in - * the rest of this function: "search" if searching for better - * modulation mode, or "active" if doing rate scaling within a mode. - */ - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - if (is_legacy(tbl->lq_type)) - lq_sta->is_green = 0; - else - lq_sta->is_green = rs_use_green(sta); - is_green = lq_sta->is_green; - - /* current tx rate */ - index = lq_sta->last_txrate_idx; - - IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, - tbl->lq_type); - - /* rates available for this association, and for modulation mode */ - rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); - - IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); - - /* mask with station rate restriction */ - if (is_legacy(tbl->lq_type)) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) - /* supp_rates has no CCK bits in A mode */ - rate_scale_index_msk = (u16) (rate_mask & - (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); - else - rate_scale_index_msk = (u16) (rate_mask & - lq_sta->supp_rates); - - } else - rate_scale_index_msk = rate_mask; - - if (!rate_scale_index_msk) - rate_scale_index_msk = rate_mask; - - if (!((1 << index) & rate_scale_index_msk)) { - IWL_ERR(priv, "Current Rate is not valid\n"); - if (lq_sta->search_better_tbl) { - /* revert to active table if search table is not valid*/ - tbl->lq_type = LQ_NONE; - lq_sta->search_better_tbl = 0; - tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - /* get "active" rate info */ - index = iwl_hwrate_to_plcp_idx(tbl->current_rate); - rs_update_rate_tbl(priv, ctx, lq_sta, tbl, - index, is_green); - } - return; - } - - /* Get expected throughput table and history window for current rate */ - if (!tbl->expected_tpt) { - IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); - return; - } - - /* force user max rate if set by user */ - if ((lq_sta->max_rate_idx != -1) && - (lq_sta->max_rate_idx < index)) { - index = lq_sta->max_rate_idx; - update_lq = 1; - window = &(tbl->win[index]); - goto lq_update; - } - - window = &(tbl->win[index]); - - /* - * If there is not enough history to calculate actual average - * throughput, keep analyzing results of more tx frames, without - * changing rate or mode (bypass most of the rest of this function). - * Set up new rate table in uCode only if old rate is not supported - * in current association (use new rate found above). - */ - fail_count = window->counter - window->success_counter; - if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && - (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { - IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " - "for index %d\n", - window->success_counter, window->counter, index); - - /* Can't calculate this yet; not enough history */ - window->average_tpt = IWL_INVALID_VALUE; - - /* Should we stay with this modulation mode, - * or search for a new one? */ - rs_stay_in_table(lq_sta, false); - - goto out; - } - /* Else we have enough samples; calculate estimate of - * actual average throughput */ - if (window->average_tpt != ((window->success_ratio * - tbl->expected_tpt[index] + 64) / 128)) { - IWL_ERR(priv, "expected_tpt should have been calculated by now\n"); - window->average_tpt = ((window->success_ratio * - tbl->expected_tpt[index] + 64) / 128); - } - - /* If we are searching for better modulation mode, check success. */ - if (lq_sta->search_better_tbl && - (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) { - /* If good success, continue using the "search" mode; - * no need to send new link quality command, since we're - * continuing to use the setup that we've been trying. */ - if (window->average_tpt > lq_sta->last_tpt) { - - IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " - "suc=%d cur-tpt=%d old-tpt=%d\n", - window->success_ratio, - window->average_tpt, - lq_sta->last_tpt); - - if (!is_legacy(tbl->lq_type)) - lq_sta->enable_counter = 1; - - /* Swap tables; "search" becomes "active" */ - lq_sta->active_tbl = active_tbl; - current_tpt = window->average_tpt; - - /* Else poor success; go back to mode in "active" table */ - } else { - - IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " - "suc=%d cur-tpt=%d old-tpt=%d\n", - window->success_ratio, - window->average_tpt, - lq_sta->last_tpt); - - /* Nullify "search" table */ - tbl->lq_type = LQ_NONE; - - /* Revert to "active" table */ - active_tbl = lq_sta->active_tbl; - tbl = &(lq_sta->lq_info[active_tbl]); - - /* Revert to "active" rate and throughput info */ - index = iwl_hwrate_to_plcp_idx(tbl->current_rate); - current_tpt = lq_sta->last_tpt; - - /* Need to set up a new rate table in uCode */ - update_lq = 1; - } - - /* Either way, we've made a decision; modulation mode - * search is done, allow rate adjustment next time. */ - lq_sta->search_better_tbl = 0; - done_search = 1; /* Don't switch modes below! */ - goto lq_update; - } - - /* (Else) not in search of better modulation mode, try for better - * starting rate, while staying in this mode. */ - high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk, - tbl->lq_type); - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* If user set max rate, dont allow higher than user constrain */ - if ((lq_sta->max_rate_idx != -1) && - (lq_sta->max_rate_idx < high)) - high = IWL_RATE_INVALID; - - sr = window->success_ratio; - - /* Collect measured throughputs for current and adjacent rates */ - current_tpt = window->average_tpt; - if (low != IWL_RATE_INVALID) - low_tpt = tbl->win[low].average_tpt; - if (high != IWL_RATE_INVALID) - high_tpt = tbl->win[high].average_tpt; - - scale_action = 0; - - /* Too many failures, decrease rate */ - if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { - IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); - scale_action = -1; - - /* No throughput measured yet for adjacent rates; try increase. */ - } else if ((low_tpt == IWL_INVALID_VALUE) && - (high_tpt == IWL_INVALID_VALUE)) { - - if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) - scale_action = 1; - else if (low != IWL_RATE_INVALID) - scale_action = 0; - } - - /* Both adjacent throughputs are measured, but neither one has better - * throughput; we're using the best rate, don't change it! */ - else if ((low_tpt != IWL_INVALID_VALUE) && - (high_tpt != IWL_INVALID_VALUE) && - (low_tpt < current_tpt) && - (high_tpt < current_tpt)) - scale_action = 0; - - /* At least one adjacent rate's throughput is measured, - * and may have better performance. */ - else { - /* Higher adjacent rate's throughput is measured */ - if (high_tpt != IWL_INVALID_VALUE) { - /* Higher rate has better throughput */ - if (high_tpt > current_tpt && - sr >= IWL_RATE_INCREASE_TH) { - scale_action = 1; - } else { - scale_action = 0; - } - - /* Lower adjacent rate's throughput is measured */ - } else if (low_tpt != IWL_INVALID_VALUE) { - /* Lower rate has better throughput */ - if (low_tpt > current_tpt) { - IWL_DEBUG_RATE(priv, - "decrease rate because of low tpt\n"); - scale_action = -1; - } else if (sr >= IWL_RATE_INCREASE_TH) { - scale_action = 1; - } - } - } - - /* Sanity check; asked for decrease, but success rate or throughput - * has been good at old rate. Don't change it. */ - if ((scale_action == -1) && (low != IWL_RATE_INVALID) && - ((sr > IWL_RATE_HIGH_TH) || - (current_tpt > (100 * tbl->expected_tpt[low])))) - scale_action = 0; - if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type)) - scale_action = -1; - if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI && - (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) - scale_action = -1; - - if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && - (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { - if (lq_sta->last_bt_traffic > priv->bt_traffic_load) { - /* - * don't set scale_action, don't want to scale up if - * the rate scale doesn't otherwise think that is a - * good idea. - */ - } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) { - scale_action = -1; - } - } - lq_sta->last_bt_traffic = priv->bt_traffic_load; - - if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && - (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { - /* search for a new modulation */ - rs_stay_in_table(lq_sta, true); - goto lq_update; - } - - switch (scale_action) { - case -1: - /* Decrease starting rate, update uCode's rate table */ - if (low != IWL_RATE_INVALID) { - update_lq = 1; - index = low; - } - - break; - case 1: - /* Increase starting rate, update uCode's rate table */ - if (high != IWL_RATE_INVALID) { - update_lq = 1; - index = high; - } - - break; - case 0: - /* No change */ - default: - break; - } - - IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " - "high %d type %d\n", - index, scale_action, low, high, tbl->lq_type); - -lq_update: - /* Replace uCode's rate table for the destination station. */ - if (update_lq) - rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green); - - if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { - /* Should we stay with this modulation mode, - * or search for a new one? */ - rs_stay_in_table(lq_sta, false); - } - /* - * Search for new modulation mode if we're: - * 1) Not changing rates right now - * 2) Not just finishing up a search - * 3) Allowing a new search - */ - if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) { - /* Save current throughput to compare with "search" throughput*/ - lq_sta->last_tpt = current_tpt; - - /* Select a new "search" modulation mode to try. - * If one is found, set up the new "search" table. */ - if (is_legacy(tbl->lq_type)) - rs_move_legacy_other(priv, lq_sta, conf, sta, index); - else if (is_siso(tbl->lq_type)) - rs_move_siso_to_other(priv, lq_sta, conf, sta, index); - else if (is_mimo2(tbl->lq_type)) - rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index); - else - rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index); - - /* If new "search" mode was selected, set up in uCode table */ - if (lq_sta->search_better_tbl) { - /* Access the "search" table, clear its history. */ - tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window(&(tbl->win[i])); - - /* Use new "search" start rate */ - index = iwl_hwrate_to_plcp_idx(tbl->current_rate); - - IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", - tbl->current_rate, index); - rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); - iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); - } else - done_search = 1; - } - - if (done_search && !lq_sta->stay_in_tbl) { - /* If the "active" (non-search) mode was legacy, - * and we've tried switching antennas, - * but we haven't been able to try HT modes (not available), - * stay with best antenna legacy modulation for a while - * before next round of mode comparisons. */ - tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && - lq_sta->action_counter > tbl1->max_search) { - IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); - rs_set_stay_in_table(priv, 1, lq_sta); - } - - /* If we're in an HT mode, and all 3 mode switch actions - * have been tried and compared, stay in this best modulation - * mode for a while before next round of mode comparisons. */ - if (lq_sta->enable_counter && - (lq_sta->action_counter >= tbl1->max_search) && - iwl_ht_enabled(priv)) { - if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && - (lq_sta->tx_agg_tid_en & (1 << tid)) && - (tid != IWL_MAX_TID_COUNT)) { - u8 sta_id = lq_sta->lq.sta_id; - tid_data = &priv->tid_data[sta_id][tid]; - if (tid_data->agg.state == IWL_AGG_OFF) { - IWL_DEBUG_RATE(priv, - "try to aggregate tid %d\n", - tid); - rs_tl_turn_on_agg(priv, tid, - lq_sta, sta); - } - } - rs_set_stay_in_table(priv, 0, lq_sta); - } - } - -out: - tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); - lq_sta->last_txrate_idx = index; -} - -/** - * rs_initialize_lq - Initialize a station's hardware rate table - * - * The uCode's station table contains a table of fallback rates - * for automatic fallback during transmission. - * - * NOTE: This sets up a default set of values. These will be replaced later - * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of - * rc80211_simple. - * - * NOTE: Run REPLY_ADD_STA command to set up station table entry, before - * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, - * which requires station table entry to exist). - */ -static void rs_initialize_lq(struct iwl_priv *priv, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) -{ - struct iwl_scale_tbl_info *tbl; - int rate_idx; - int i; - u32 rate; - u8 use_green = rs_use_green(sta); - u8 active_tbl = 0; - u8 valid_tx_ant; - struct iwl_station_priv *sta_priv; - struct iwl_rxon_context *ctx; - - if (!sta || !lq_sta) - return; - - sta_priv = (void *)sta->drv_priv; - ctx = sta_priv->ctx; - - i = lq_sta->last_txrate_idx; - - valid_tx_ant = priv->nvm_data->valid_tx_ant; - - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - if ((i < 0) || (i >= IWL_RATE_COUNT)) - i = 0; - - rate = iwl_rates[i].plcp; - tbl->ant_type = first_antenna(valid_tx_ant); - rate |= tbl->ant_type << RATE_MCS_ANT_POS; - - if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) - rate |= RATE_MCS_CCK_MSK; - - rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); - if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) - rs_toggle_antenna(valid_tx_ant, &rate, tbl); - - rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); - tbl->current_rate = rate; - rs_set_expected_tpt_table(lq_sta, tbl); - rs_fill_link_cmd(NULL, lq_sta, rate); - priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; - iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true); -} - -static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_rate_control *txrc) -{ - - struct sk_buff *skb = txrc->skb; - struct ieee80211_supported_band *sband = txrc->sband; - struct iwl_op_mode *op_mode __maybe_unused = - (struct iwl_op_mode *)priv_r; - struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_lq_sta *lq_sta = priv_sta; - int rate_idx; - - IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); - - /* Get max rate if user set max rate */ - if (lq_sta) { - lq_sta->max_rate_idx = txrc->max_rate_idx; - if ((sband->band == IEEE80211_BAND_5GHZ) && - (lq_sta->max_rate_idx != -1)) - lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; - if ((lq_sta->max_rate_idx < 0) || - (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) - lq_sta->max_rate_idx = -1; - } - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->drv) { - IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); - priv_sta = NULL; - } - - /* Send management frames and NO_ACK data using lowest rate. */ - if (rate_control_send_low(sta, priv_sta, txrc)) - return; - - rate_idx = lq_sta->last_txrate_idx; - - if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { - rate_idx -= IWL_FIRST_OFDM_RATE; - /* 6M and 9M shared same MCS index */ - rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; - if (rs_extract_rate(lq_sta->last_rate_n_flags) >= - IWL_RATE_MIMO3_6M_PLCP) - rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM); - else if (rs_extract_rate(lq_sta->last_rate_n_flags) >= - IWL_RATE_MIMO2_6M_PLCP) - rate_idx = rate_idx + MCS_INDEX_PER_STREAM; - info->control.rates[0].flags = IEEE80211_TX_RC_MCS; - if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) - info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI; - if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) - info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA; - if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) - info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) - info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD; - } else { - /* Check for invalid rates */ - if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || - ((sband->band == IEEE80211_BAND_5GHZ) && - (rate_idx < IWL_FIRST_OFDM_RATE))) - rate_idx = rate_lowest_index(sband, sta); - /* On valid 5 GHz rate, adjust index */ - else if (sband->band == IEEE80211_BAND_5GHZ) - rate_idx -= IWL_FIRST_OFDM_RATE; - info->control.rates[0].flags = 0; - } - info->control.rates[0].idx = rate_idx; - info->control.rates[0].count = 1; -} - -static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, - gfp_t gfp) -{ - struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv; - struct iwl_op_mode *op_mode __maybe_unused = - (struct iwl_op_mode *)priv_rate; - struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode); - - IWL_DEBUG_RATE(priv, "create station rate scale window\n"); - - return &sta_priv->lq_sta; -} - -/* - * Called after adding a new station to initialize rate scaling - */ -void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id) -{ - int i, j; - struct ieee80211_hw *hw = priv->hw; - struct ieee80211_conf *conf = &priv->hw->conf; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct iwl_station_priv *sta_priv; - struct iwl_lq_sta *lq_sta; - struct ieee80211_supported_band *sband; - unsigned long supp; /* must be unsigned long for for_each_set_bit */ - - sta_priv = (struct iwl_station_priv *) sta->drv_priv; - lq_sta = &sta_priv->lq_sta; - sband = hw->wiphy->bands[conf->chandef.chan->band]; - - - lq_sta->lq.sta_id = sta_id; - - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); - - lq_sta->flush_timer = 0; - lq_sta->supp_rates = sta->supp_rates[sband->band]; - - IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n", - sta_id); - /* TODO: what is a good starting rate for STA? About middle? Maybe not - * the lowest or the highest rate.. Could consider using RSSI from - * previous packets? Need to have IEEE 802.1X auth succeed immediately - * after assoc.. */ - - lq_sta->is_dup = 0; - lq_sta->max_rate_idx = -1; - lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; - lq_sta->is_green = rs_use_green(sta); - lq_sta->band = sband->band; - /* - * active legacy rates as per supported rates bitmap - */ - supp = sta->supp_rates[sband->band]; - lq_sta->active_legacy_rate = 0; - for_each_set_bit(i, &supp, BITS_PER_LONG) - lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); - - /* - * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), - * supp_rates[] does not; shift to convert format, force 9 MBits off. - */ - lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; - lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; - lq_sta->active_siso_rate &= ~((u16)0x2); - lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; - - /* Same here */ - lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; - lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; - lq_sta->active_mimo2_rate &= ~((u16)0x2); - lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; - - lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1; - lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1; - lq_sta->active_mimo3_rate &= ~((u16)0x2); - lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; - - IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", - lq_sta->active_siso_rate, - lq_sta->active_mimo2_rate, - lq_sta->active_mimo3_rate); - - /* These values will be overridden later */ - lq_sta->lq.general_params.single_stream_ant_msk = - first_antenna(priv->nvm_data->valid_tx_ant); - lq_sta->lq.general_params.dual_stream_ant_msk = - priv->nvm_data->valid_tx_ant & - ~first_antenna(priv->nvm_data->valid_tx_ant); - if (!lq_sta->lq.general_params.dual_stream_ant_msk) { - lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; - } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) { - lq_sta->lq.general_params.dual_stream_ant_msk = - priv->nvm_data->valid_tx_ant; - } - - /* as default allow aggregation for all tids */ - lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; - lq_sta->drv = priv; - - /* Set last_txrate_idx to lowest rate */ - lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) - lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; - lq_sta->is_agg = 0; -#ifdef CONFIG_MAC80211_DEBUGFS - lq_sta->dbg_fixed_rate = 0; -#endif - - rs_initialize_lq(priv, sta, lq_sta); -} - -static void rs_fill_link_cmd(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, u32 new_rate) -{ - struct iwl_scale_tbl_info tbl_type; - int index = 0; - int rate_idx; - int repeat_rate = 0; - u8 ant_toggle_cnt = 0; - u8 use_ht_possible = 1; - u8 valid_tx_ant = 0; - struct iwl_station_priv *sta_priv = - container_of(lq_sta, struct iwl_station_priv, lq_sta); - struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; - - /* Override starting rate (index 0) if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - - /* Interpret new_rate (rate_n_flags) */ - rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, - &tbl_type, &rate_idx); - - if (priv && priv->bt_full_concurrent) { - /* 1x1 only */ - tbl_type.ant_type = - first_antenna(priv->nvm_data->valid_tx_ant); - } - - /* How many times should we repeat the initial rate? */ - if (is_legacy(tbl_type.lq_type)) { - ant_toggle_cnt = 1; - repeat_rate = IWL_NUMBER_TRY; - } else { - repeat_rate = min(IWL_HT_NUMBER_TRY, - LINK_QUAL_AGG_DISABLE_START_DEF - 1); - } - - lq_cmd->general_params.mimo_delimiter = - is_mimo(tbl_type.lq_type) ? 1 : 0; - - /* Fill 1st table entry (index 0) */ - lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); - - if (num_of_ant(tbl_type.ant_type) == 1) { - lq_cmd->general_params.single_stream_ant_msk = - tbl_type.ant_type; - } else if (num_of_ant(tbl_type.ant_type) == 2) { - lq_cmd->general_params.dual_stream_ant_msk = - tbl_type.ant_type; - } /* otherwise we don't modify the existing value */ - - index++; - repeat_rate--; - if (priv) { - if (priv->bt_full_concurrent) - valid_tx_ant = ANT_A; - else - valid_tx_ant = priv->nvm_data->valid_tx_ant; - } - - /* Fill rest of rate table */ - while (index < LINK_QUAL_MAX_RETRY_NUM) { - /* Repeat initial/next rate. - * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. - * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ - while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; - else if (priv && - rs_toggle_antenna(valid_tx_ant, - &new_rate, &tbl_type)) - ant_toggle_cnt = 1; - } - - /* Override next rate if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - - /* Fill next table entry */ - lq_cmd->rs_table[index].rate_n_flags = - cpu_to_le32(new_rate); - repeat_rate--; - index++; - } - - rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, - &rate_idx); - - if (priv && priv->bt_full_concurrent) { - /* 1x1 only */ - tbl_type.ant_type = - first_antenna(priv->nvm_data->valid_tx_ant); - } - - /* Indicate to uCode which entries might be MIMO. - * If initial rate was MIMO, this will finally end up - * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ - if (is_mimo(tbl_type.lq_type)) - lq_cmd->general_params.mimo_delimiter = index; - - /* Get next rate */ - new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, - use_ht_possible); - - /* How many times should we repeat the next rate? */ - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; - else if (priv && - rs_toggle_antenna(valid_tx_ant, - &new_rate, &tbl_type)) - ant_toggle_cnt = 1; - - repeat_rate = IWL_NUMBER_TRY; - } else { - repeat_rate = IWL_HT_NUMBER_TRY; - } - - /* Don't allow HT rates after next pass. - * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ - use_ht_possible = 0; - - /* Override next rate if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - - /* Fill next table entry */ - lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); - - index++; - repeat_rate--; - } - - lq_cmd->agg_params.agg_frame_cnt_limit = - sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF; - lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - - lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); - /* - * overwrite if needed, pass aggregation time limit - * to uCode in uSec - */ - if (priv && priv->lib->bt_params && - priv->lib->bt_params->agg_time_limit && - priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) - lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(priv->lib->bt_params->agg_time_limit); -} - -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - return hw->priv; -} -/* rate scale requires free function to be implemented */ -static void rs_free(void *priv_rate) -{ - return; -} - -static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, - void *priv_sta) -{ - struct iwl_op_mode *op_mode __maybe_unused = priv_r; - struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode); - - IWL_DEBUG_RATE(priv, "enter\n"); - IWL_DEBUG_RATE(priv, "leave\n"); -} - -#ifdef CONFIG_MAC80211_DEBUGFS -static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index) -{ - struct iwl_priv *priv; - u8 valid_tx_ant; - u8 ant_sel_tx; - - priv = lq_sta->drv; - valid_tx_ant = priv->nvm_data->valid_tx_ant; - if (lq_sta->dbg_fixed_rate) { - ant_sel_tx = - ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) - >> RATE_MCS_ANT_POS); - if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { - *rate_n_flags = lq_sta->dbg_fixed_rate; - IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); - } else { - lq_sta->dbg_fixed_rate = 0; - IWL_ERR(priv, - "Invalid antenna selection 0x%X, Valid is 0x%X\n", - ant_sel_tx, valid_tx_ant); - IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); - } - } else { - IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); - } -} - -static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_priv *priv; - char buf[64]; - size_t buf_size; - u32 parsed_rate; - - - priv = lq_sta->drv; - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x", &parsed_rate) == 1) - lq_sta->dbg_fixed_rate = parsed_rate; - else - lq_sta->dbg_fixed_rate = 0; - - rs_program_fix_rate(priv, lq_sta); - - return count; -} - -static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i = 0; - int index = 0; - ssize_t ret; - - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_priv *priv; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - - priv = lq_sta->drv; - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); - desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", - lq_sta->total_failed, lq_sta->total_success, - lq_sta->active_legacy_rate); - desc += sprintf(buff+desc, "fixed rate 0x%X\n", - lq_sta->dbg_fixed_rate); - desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", - (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "", - (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "", - (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : ""); - desc += sprintf(buff+desc, "lq type %s\n", - (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); - if (is_Ht(tbl->lq_type)) { - desc += sprintf(buff + desc, " %s", - (is_siso(tbl->lq_type)) ? "SISO" : - ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); - desc += sprintf(buff + desc, " %s", - (tbl->is_ht40) ? "40MHz" : "20MHz"); - desc += sprintf(buff + desc, " %s %s %s\n", - (tbl->is_SGI) ? "SGI" : "", - (lq_sta->is_green) ? "GF enabled" : "", - (lq_sta->is_agg) ? "AGG on" : ""); - } - desc += sprintf(buff+desc, "last tx rate=0x%X\n", - lq_sta->last_rate_n_flags); - desc += sprintf(buff+desc, "general:" - "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", - lq_sta->lq.general_params.flags, - lq_sta->lq.general_params.mimo_delimiter, - lq_sta->lq.general_params.single_stream_ant_msk, - lq_sta->lq.general_params.dual_stream_ant_msk); - - desc += sprintf(buff+desc, "agg:" - "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", - le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), - lq_sta->lq.agg_params.agg_dis_start_th, - lq_sta->lq.agg_params.agg_frame_cnt_limit); - - desc += sprintf(buff+desc, - "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", - lq_sta->lq.general_params.start_rate_index[0], - lq_sta->lq.general_params.start_rate_index[1], - lq_sta->lq.general_params.start_rate_index[2], - lq_sta->lq.general_params.start_rate_index[3]); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - index = iwl_hwrate_to_plcp_idx( - le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); - if (is_legacy(tbl->lq_type)) { - desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", - i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), - iwl_rate_mcs[index].mbps); - } else { - desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n", - i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), - iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); - } - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_scale_table_ops = { - .write = rs_sta_dbgfs_scale_table_write, - .read = rs_sta_dbgfs_scale_table_read, - .open = simple_open, - .llseek = default_llseek, -}; -static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i, j; - ssize_t ret; - - struct iwl_lq_sta *lq_sta = file->private_data; - - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - for (i = 0; i < LQ_SIZE; i++) { - desc += sprintf(buff+desc, - "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" - "rate=0x%X\n", - lq_sta->active_tbl == i ? "*" : "x", - lq_sta->lq_info[i].lq_type, - lq_sta->lq_info[i].is_SGI, - lq_sta->lq_info[i].is_ht40, - lq_sta->lq_info[i].is_dup, - lq_sta->is_green, - lq_sta->lq_info[i].current_rate); - for (j = 0; j < IWL_RATE_COUNT; j++) { - desc += sprintf(buff+desc, - "counter=%d success=%d %%=%d\n", - lq_sta->lq_info[i].win[j].counter, - lq_sta->lq_info[i].win[j].success_counter, - lq_sta->lq_info[i].win[j].success_ratio); - } - } - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_stats_table_ops = { - .read = rs_sta_dbgfs_stats_table_read, - .open = simple_open, - .llseek = default_llseek, -}; - -static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; - char buff[120]; - int desc = 0; - - if (is_Ht(tbl->lq_type)) - desc += sprintf(buff+desc, - "Bit Rate= %d Mb/s\n", - tbl->expected_tpt[lq_sta->last_txrate_idx]); - else - desc += sprintf(buff+desc, - "Bit Rate= %d Mb/s\n", - iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); - - return simple_read_from_buffer(user_buf, count, ppos, buff, desc); -} - -static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { - .read = rs_sta_dbgfs_rate_scale_data_read, - .open = simple_open, - .llseek = default_llseek, -}; - -static void rs_add_debugfs(void *priv, void *priv_sta, - struct dentry *dir) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); - lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); - lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, - &lq_sta->tx_agg_tid_en); - -} - -static void rs_remove_debugfs(void *priv, void *priv_sta) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); -} -#endif - -/* - * Initialization of rate scaling information is done by driver after - * the station is added. Since mac80211 calls this function before a - * station is added we ignore it. - */ -static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, - struct cfg80211_chan_def *chandef, - struct ieee80211_sta *sta, void *priv_sta) -{ -} - -static const struct rate_control_ops rs_ops = { - .name = RS_NAME, - .tx_status = rs_tx_status, - .get_rate = rs_get_rate, - .rate_init = rs_rate_init_stub, - .alloc = rs_alloc, - .free = rs_free, - .alloc_sta = rs_alloc_sta, - .free_sta = rs_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = rs_add_debugfs, - .remove_sta_debugfs = rs_remove_debugfs, -#endif -}; - -int iwlagn_rate_control_register(void) -{ - return ieee80211_rate_control_register(&rs_ops); -} - -void iwlagn_rate_control_unregister(void) -{ - ieee80211_rate_control_unregister(&rs_ops); -} - diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h deleted file mode 100644 index f6bd25cad203..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/rs.h +++ /dev/null @@ -1,426 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_agn_rs_h__ -#define __iwl_agn_rs_h__ - -#include - -#include "iwl-config.h" - -#include "commands.h" - -struct iwl_rate_info { - u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ - u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ - u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ - u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ - u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ - u8 prev_ieee; /* previous rate in IEEE speeds */ - u8 next_ieee; /* next rate in IEEE speeds */ - u8 prev_rs; /* previous rate used in rs algo */ - u8 next_rs; /* next rate used in rs algo */ - u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ - u8 next_rs_tgg; /* next rate used in TGG rs algo */ -}; - -/* - * These serve as indexes into - * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; - */ -enum { - IWL_RATE_1M_INDEX = 0, - IWL_RATE_2M_INDEX, - IWL_RATE_5M_INDEX, - IWL_RATE_11M_INDEX, - IWL_RATE_6M_INDEX, - IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, - IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, - IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, - IWL_RATE_54M_INDEX, - IWL_RATE_60M_INDEX, - IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/ - IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ - IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, - IWL_RATE_INVALID = IWL_RATE_COUNT, -}; - -enum { - IWL_RATE_6M_INDEX_TABLE = 0, - IWL_RATE_9M_INDEX_TABLE, - IWL_RATE_12M_INDEX_TABLE, - IWL_RATE_18M_INDEX_TABLE, - IWL_RATE_24M_INDEX_TABLE, - IWL_RATE_36M_INDEX_TABLE, - IWL_RATE_48M_INDEX_TABLE, - IWL_RATE_54M_INDEX_TABLE, - IWL_RATE_1M_INDEX_TABLE, - IWL_RATE_2M_INDEX_TABLE, - IWL_RATE_5M_INDEX_TABLE, - IWL_RATE_11M_INDEX_TABLE, - IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, -}; - -enum { - IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, - IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, - IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, - IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, -}; - -/* #define vs. enum to keep from defaulting to 'large integer' */ -#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) -#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) -#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) -#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) -#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) -#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) -#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) -#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) -#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) -#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) -#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) -#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) -#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) - -/* uCode API values for legacy bit rates, both OFDM and CCK */ -enum { - IWL_RATE_6M_PLCP = 13, - IWL_RATE_9M_PLCP = 15, - IWL_RATE_12M_PLCP = 5, - IWL_RATE_18M_PLCP = 7, - IWL_RATE_24M_PLCP = 9, - IWL_RATE_36M_PLCP = 11, - IWL_RATE_48M_PLCP = 1, - IWL_RATE_54M_PLCP = 3, - IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ - IWL_RATE_1M_PLCP = 10, - IWL_RATE_2M_PLCP = 20, - IWL_RATE_5M_PLCP = 55, - IWL_RATE_11M_PLCP = 110, - /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */ - /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ -}; - -/* uCode API values for OFDM high-throughput (HT) bit rates */ -enum { - IWL_RATE_SISO_6M_PLCP = 0, - IWL_RATE_SISO_12M_PLCP = 1, - IWL_RATE_SISO_18M_PLCP = 2, - IWL_RATE_SISO_24M_PLCP = 3, - IWL_RATE_SISO_36M_PLCP = 4, - IWL_RATE_SISO_48M_PLCP = 5, - IWL_RATE_SISO_54M_PLCP = 6, - IWL_RATE_SISO_60M_PLCP = 7, - IWL_RATE_MIMO2_6M_PLCP = 0x8, - IWL_RATE_MIMO2_12M_PLCP = 0x9, - IWL_RATE_MIMO2_18M_PLCP = 0xa, - IWL_RATE_MIMO2_24M_PLCP = 0xb, - IWL_RATE_MIMO2_36M_PLCP = 0xc, - IWL_RATE_MIMO2_48M_PLCP = 0xd, - IWL_RATE_MIMO2_54M_PLCP = 0xe, - IWL_RATE_MIMO2_60M_PLCP = 0xf, - IWL_RATE_MIMO3_6M_PLCP = 0x10, - IWL_RATE_MIMO3_12M_PLCP = 0x11, - IWL_RATE_MIMO3_18M_PLCP = 0x12, - IWL_RATE_MIMO3_24M_PLCP = 0x13, - IWL_RATE_MIMO3_36M_PLCP = 0x14, - IWL_RATE_MIMO3_48M_PLCP = 0x15, - IWL_RATE_MIMO3_54M_PLCP = 0x16, - IWL_RATE_MIMO3_60M_PLCP = 0x17, - IWL_RATE_SISO_INVM_PLCP, - IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, - IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, -}; - -/* MAC header values for bit rates */ -enum { - IWL_RATE_6M_IEEE = 12, - IWL_RATE_9M_IEEE = 18, - IWL_RATE_12M_IEEE = 24, - IWL_RATE_18M_IEEE = 36, - IWL_RATE_24M_IEEE = 48, - IWL_RATE_36M_IEEE = 72, - IWL_RATE_48M_IEEE = 96, - IWL_RATE_54M_IEEE = 108, - IWL_RATE_60M_IEEE = 120, - IWL_RATE_1M_IEEE = 2, - IWL_RATE_2M_IEEE = 4, - IWL_RATE_5M_IEEE = 11, - IWL_RATE_11M_IEEE = 22, -}; - -#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) - -#define IWL_INVALID_VALUE -1 - -#define IWL_MIN_RSSI_VAL -100 -#define IWL_MAX_RSSI_VAL 0 - -/* These values specify how many Tx frame attempts before - * searching for a new modulation mode */ -#define IWL_LEGACY_FAILURE_LIMIT 160 -#define IWL_LEGACY_SUCCESS_LIMIT 480 -#define IWL_LEGACY_TABLE_COUNT 160 - -#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 -#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 -#define IWL_NONE_LEGACY_TABLE_COUNT 1500 - -/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ -#define IWL_RS_GOOD_RATIO 12800 /* 100% */ -#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ -#define IWL_RATE_HIGH_TH 10880 /* 85% */ -#define IWL_RATE_INCREASE_TH 6400 /* 50% */ -#define IWL_RATE_DECREASE_TH 1920 /* 15% */ - -/* possible actions when in legacy mode */ -#define IWL_LEGACY_SWITCH_ANTENNA1 0 -#define IWL_LEGACY_SWITCH_ANTENNA2 1 -#define IWL_LEGACY_SWITCH_SISO 2 -#define IWL_LEGACY_SWITCH_MIMO2_AB 3 -#define IWL_LEGACY_SWITCH_MIMO2_AC 4 -#define IWL_LEGACY_SWITCH_MIMO2_BC 5 -#define IWL_LEGACY_SWITCH_MIMO3_ABC 6 - -/* possible actions when in siso mode */ -#define IWL_SISO_SWITCH_ANTENNA1 0 -#define IWL_SISO_SWITCH_ANTENNA2 1 -#define IWL_SISO_SWITCH_MIMO2_AB 2 -#define IWL_SISO_SWITCH_MIMO2_AC 3 -#define IWL_SISO_SWITCH_MIMO2_BC 4 -#define IWL_SISO_SWITCH_GI 5 -#define IWL_SISO_SWITCH_MIMO3_ABC 6 - - -/* possible actions when in mimo mode */ -#define IWL_MIMO2_SWITCH_ANTENNA1 0 -#define IWL_MIMO2_SWITCH_ANTENNA2 1 -#define IWL_MIMO2_SWITCH_SISO_A 2 -#define IWL_MIMO2_SWITCH_SISO_B 3 -#define IWL_MIMO2_SWITCH_SISO_C 4 -#define IWL_MIMO2_SWITCH_GI 5 -#define IWL_MIMO2_SWITCH_MIMO3_ABC 6 - - -/* possible actions when in mimo3 mode */ -#define IWL_MIMO3_SWITCH_ANTENNA1 0 -#define IWL_MIMO3_SWITCH_ANTENNA2 1 -#define IWL_MIMO3_SWITCH_SISO_A 2 -#define IWL_MIMO3_SWITCH_SISO_B 3 -#define IWL_MIMO3_SWITCH_SISO_C 4 -#define IWL_MIMO3_SWITCH_MIMO2_AB 5 -#define IWL_MIMO3_SWITCH_MIMO2_AC 6 -#define IWL_MIMO3_SWITCH_MIMO2_BC 7 -#define IWL_MIMO3_SWITCH_GI 8 - - -#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI -#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC - -/*FIXME:RS:add possible actions for MIMO3*/ - -#define IWL_ACTION_LIMIT 3 /* # possible actions */ - -#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ - -/* load per tid defines for A-MPDU activation */ -#define IWL_AGG_TPT_THREHOLD 0 -#define IWL_AGG_LOAD_THRESHOLD 10 -#define IWL_AGG_ALL_TID 0xff -#define TID_QUEUE_CELL_SPACING 50 /*mS */ -#define TID_QUEUE_MAX_SIZE 20 -#define TID_ROUND_VALUE 5 /* mS */ - -#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) -#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) - -extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; - -enum iwl_table_type { - LQ_NONE, - LQ_G, /* legacy types */ - LQ_A, - LQ_SISO, /* high-throughput types */ - LQ_MIMO2, - LQ_MIMO3, - LQ_MAX, -}; - -#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) -#define is_siso(tbl) ((tbl) == LQ_SISO) -#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) -#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) -#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) -#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) -#define is_a_band(tbl) ((tbl) == LQ_A) -#define is_g_and(tbl) ((tbl) == LQ_G) - -#define IWL_MAX_MCS_DISPLAY_SIZE 12 - -struct iwl_rate_mcs_info { - char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; - char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; -}; - -/** - * struct iwl_rate_scale_data -- tx success history for one rate - */ -struct iwl_rate_scale_data { - u64 data; /* bitmap of successful frames */ - s32 success_counter; /* number of frames successful */ - s32 success_ratio; /* per-cent * 128 */ - s32 counter; /* number of frames attempted */ - s32 average_tpt; /* success ratio * expected throughput */ - unsigned long stamp; -}; - -/** - * struct iwl_scale_tbl_info -- tx params and success history for all rates - * - * There are two of these in struct iwl_lq_sta, - * one for "active", and one for "search". - */ -struct iwl_scale_tbl_info { - enum iwl_table_type lq_type; - u8 ant_type; - u8 is_SGI; /* 1 = short guard interval */ - u8 is_ht40; /* 1 = 40 MHz channel width */ - u8 is_dup; /* 1 = duplicated data streams */ - u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ - u8 max_search; /* maximun number of tables we can search */ - const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ - u32 current_rate; /* rate_n_flags, uCode API format */ - struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ -}; - -struct iwl_traffic_load { - unsigned long time_stamp; /* age of the oldest statistics */ - u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time - * slice */ - u32 total; /* total num of packets during the - * last TID_MAX_TIME_DIFF */ - u8 queue_count; /* number of queues that has - * been used since the last cleanup */ - u8 head; /* start of the circular buffer */ -}; - -/** - * struct iwl_lq_sta -- driver's rate scaling private structure - * - * Pointer to this gets passed back and forth between driver and mac80211. - */ -struct iwl_lq_sta { - u8 active_tbl; /* index of active table, range 0-1 */ - u8 enable_counter; /* indicates HT mode */ - u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ - u8 search_better_tbl; /* 1: currently trying alternate mode */ - s32 last_tpt; - - /* The following determine when to search for a new mode */ - u32 table_count_limit; - u32 max_failure_limit; /* # failed frames before new search */ - u32 max_success_limit; /* # successful frames before new search */ - u32 table_count; - u32 total_failed; /* total failed frames, any/all rates */ - u32 total_success; /* total successful frames, any/all rates */ - u64 flush_timer; /* time staying in mode before new search */ - - u8 action_counter; /* # mode-switch actions tried */ - u8 is_green; - u8 is_dup; - enum ieee80211_band band; - - /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ - u32 supp_rates; - u16 active_legacy_rate; - u16 active_siso_rate; - u16 active_mimo2_rate; - u16 active_mimo3_rate; - s8 max_rate_idx; /* Max rate set by user */ - u8 missed_rate_counter; - - struct iwl_link_quality_cmd lq; - struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; - u8 tx_agg_tid_en; -#ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_scale_table_file; - struct dentry *rs_sta_dbgfs_stats_table_file; - struct dentry *rs_sta_dbgfs_rate_scale_data_file; - struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; - u32 dbg_fixed_rate; -#endif - struct iwl_priv *drv; - - /* used to be in sta_info */ - int last_txrate_idx; - /* last tx rate_n_flags */ - u32 last_rate_n_flags; - /* packets destined for this STA are aggregated */ - u8 is_agg; - /* BT traffic this sta was last updated in */ - u8 last_bt_traffic; -}; - -static inline u8 first_antenna(u8 mask) -{ - if (mask & ANT_A) - return ANT_A; - if (mask & ANT_B) - return ANT_B; - return ANT_C; -} - - -/* Initialize station's rate scaling information after adding station */ -void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, - u8 sta_id); - -/** - * iwl_rate_control_register - Register the rate control algorithm callbacks - * - * Since the rate control algorithm is hardware specific, there is no need - * or reason to place it as a stand alone module. The driver can call - * iwl_rate_control_register in order to register the rate control callbacks - * with the mac80211 subsystem. This should be performed prior to calling - * ieee80211_register_hw - * - */ -int iwlagn_rate_control_register(void); - -/** - * iwl_rate_control_unregister - Unregister the rate control callbacks - * - * This should be called after calling ieee80211_unregister_hw, but before - * the driver is unloaded. - */ -void iwlagn_rate_control_unregister(void); - -#endif /* __iwl_agn__rs__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c deleted file mode 100644 index 4a45b0b594c7..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ /dev/null @@ -1,1101 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portionhelp of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include "iwl-io.h" -#include "dev.h" -#include "calib.h" -#include "agn.h" - -#define IWL_CMD_ENTRY(x) [x] = #x - -const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = { - IWL_CMD_ENTRY(REPLY_ALIVE), - IWL_CMD_ENTRY(REPLY_ERROR), - IWL_CMD_ENTRY(REPLY_ECHO), - IWL_CMD_ENTRY(REPLY_RXON), - IWL_CMD_ENTRY(REPLY_RXON_ASSOC), - IWL_CMD_ENTRY(REPLY_QOS_PARAM), - IWL_CMD_ENTRY(REPLY_RXON_TIMING), - IWL_CMD_ENTRY(REPLY_ADD_STA), - IWL_CMD_ENTRY(REPLY_REMOVE_STA), - IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA), - IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH), - IWL_CMD_ENTRY(REPLY_WEPKEY), - IWL_CMD_ENTRY(REPLY_TX), - IWL_CMD_ENTRY(REPLY_LEDS_CMD), - IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD), - IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD), - IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION), - IWL_CMD_ENTRY(COEX_EVENT_CMD), - IWL_CMD_ENTRY(REPLY_QUIET_CMD), - IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH), - IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD), - IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION), - IWL_CMD_ENTRY(POWER_TABLE_CMD), - IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION), - IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC), - IWL_CMD_ENTRY(REPLY_SCAN_CMD), - IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD), - IWL_CMD_ENTRY(SCAN_START_NOTIFICATION), - IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION), - IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION), - IWL_CMD_ENTRY(BEACON_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_TX_BEACON), - IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION), - IWL_CMD_ENTRY(QUIET_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD), - IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_BT_CONFIG), - IWL_CMD_ENTRY(REPLY_STATISTICS_CMD), - IWL_CMD_ENTRY(STATISTICS_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD), - IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION), - IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD), - IWL_CMD_ENTRY(SENSITIVITY_CMD), - IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD), - IWL_CMD_ENTRY(REPLY_RX_PHY_CMD), - IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD), - IWL_CMD_ENTRY(REPLY_COMPRESSED_BA), - IWL_CMD_ENTRY(CALIBRATION_CFG_CMD), - IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION), - IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD), - IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION), - IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD), - IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF), - IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE), - IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV), - IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS), - IWL_CMD_ENTRY(REPLY_WIPAN_RXON), - IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING), - IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC), - IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM), - IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY), - IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH), - IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION), - IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE), - IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS), - IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER), - IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS), - IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS), - IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL), - IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS), - IWL_CMD_ENTRY(REPLY_D3_CONFIG), -}; -#undef IWL_CMD_ENTRY - -/****************************************************************************** - * - * Generic RX handler implementations - * - ******************************************************************************/ - -static void iwlagn_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_error_resp *err_resp = (void *)pkt->data; - - IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) " - "seq 0x%04X ser 0x%08X\n", - le32_to_cpu(err_resp->error_type), - err_resp->cmd_id, - le16_to_cpu(err_resp->bad_cmd_seq_num), - le32_to_cpu(err_resp->error_info)); -} - -static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_csa_notification *csa = (void *)pkt->data; - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_rxon_cmd *rxon = (void *)&ctx->active; - - if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - return; - - if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { - rxon->channel = csa->channel; - ctx->staging.channel = csa->channel; - IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", - le16_to_cpu(csa->channel)); - iwl_chswitch_done(priv, true); - } else { - IWL_ERR(priv, "CSA notif (fail) : channel %d\n", - le16_to_cpu(csa->channel)); - iwl_chswitch_done(priv, false); - } -} - - -static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_spectrum_notification *report = (void *)pkt->data; - - if (!report->state) { - IWL_DEBUG_11H(priv, - "Spectrum Measure Notification: Start\n"); - return; - } - - memcpy(&priv->measure_report, report, sizeof(*report)); - priv->measurement_status |= MEASUREMENT_READY; -} - -static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_sleep_notification *sleep = (void *)pkt->data; - IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", - sleep->pm_sleep_mode, sleep->pm_wakeup_src); -#endif -} - -static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 __maybe_unused len = iwl_rx_packet_len(pkt); - IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " - "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len); - iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); -} - -static void iwlagn_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwlagn_beacon_notif *beacon = (void *)pkt->data; -#ifdef CONFIG_IWLWIFI_DEBUG - u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status); - u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); - - IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " - "tsf:0x%.8x%.8x rate:%d\n", - status & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), - le32_to_cpu(beacon->low_tsf), rate); -#endif - - priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); -} - -/** - * iwl_good_plcp_health - checks for plcp error. - * - * When the plcp error is exceeding the thresholds, reset the radio - * to improve the throughput. - */ -static bool iwlagn_good_plcp_health(struct iwl_priv *priv, - struct statistics_rx_phy *cur_ofdm, - struct statistics_rx_ht_phy *cur_ofdm_ht, - unsigned int msecs) -{ - int delta; - int threshold = priv->plcp_delta_threshold; - - if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { - IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); - return true; - } - - delta = le32_to_cpu(cur_ofdm->plcp_err) - - le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) + - le32_to_cpu(cur_ofdm_ht->plcp_err) - - le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err); - - /* Can be negative if firmware reset statistics */ - if (delta <= 0) - return true; - - if ((delta * 100 / msecs) > threshold) { - IWL_DEBUG_RADIO(priv, - "plcp health threshold %u delta %d msecs %u\n", - threshold, delta, msecs); - return false; - } - - return true; -} - -int iwl_force_rf_reset(struct iwl_priv *priv, bool external) -{ - struct iwl_rf_reset *rf_reset; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EAGAIN; - - if (!iwl_is_any_associated(priv)) { - IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); - return -ENOLINK; - } - - rf_reset = &priv->rf_reset; - rf_reset->reset_request_count++; - if (!external && rf_reset->last_reset_jiffies && - time_after(rf_reset->last_reset_jiffies + - IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) { - IWL_DEBUG_INFO(priv, "RF reset rejected\n"); - rf_reset->reset_reject_count++; - return -EAGAIN; - } - rf_reset->reset_success_count++; - rf_reset->last_reset_jiffies = jiffies; - - /* - * There is no easy and better way to force reset the radio, - * the only known method is switching channel which will force to - * reset and tune the radio. - * Use internal short scan (single channel) operation to should - * achieve this objective. - * Driver should reset the radio when number of consecutive missed - * beacon, or any other uCode error condition detected. - */ - IWL_DEBUG_INFO(priv, "perform radio reset.\n"); - iwl_internal_short_hw_scan(priv); - return 0; -} - - -static void iwlagn_recover_from_statistics(struct iwl_priv *priv, - struct statistics_rx_phy *cur_ofdm, - struct statistics_rx_ht_phy *cur_ofdm_ht, - struct statistics_tx *tx, - unsigned long stamp) -{ - unsigned int msecs; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); - - /* Only gather statistics and update time stamp when not associated */ - if (!iwl_is_any_associated(priv)) - return; - - /* Do not check/recover when do not have enough statistics data */ - if (msecs < 99) - return; - - if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs)) - iwl_force_rf_reset(priv, false); -} - -/* Calculate noise level, based on measurements during network silence just - * before arriving beacon. This measurement can be done only if we know - * exactly when to expect beacons, therefore only when we're associated. */ -static void iwlagn_rx_calc_noise(struct iwl_priv *priv) -{ - struct statistics_rx_non_phy *rx_info; - int num_active_rx = 0; - int total_silence = 0; - int bcn_silence_a, bcn_silence_b, bcn_silence_c; - int last_rx_noise; - - rx_info = &priv->statistics.rx_non_phy; - - bcn_silence_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; - bcn_silence_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; - bcn_silence_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; - - if (bcn_silence_a) { - total_silence += bcn_silence_a; - num_active_rx++; - } - if (bcn_silence_b) { - total_silence += bcn_silence_b; - num_active_rx++; - } - if (bcn_silence_c) { - total_silence += bcn_silence_c; - num_active_rx++; - } - - /* Average among active antennas */ - if (num_active_rx) - last_rx_noise = (total_silence / num_active_rx) - 107; - else - last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; - - IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", - bcn_silence_a, bcn_silence_b, bcn_silence_c, - last_rx_noise); -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -/* - * based on the assumption of all statistics counter are in DWORD - * FIXME: This function is for debugging, do not deal with - * the case of counters roll-over. - */ -static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta, - __le32 *max_delta, __le32 *accum, int size) -{ - int i; - - for (i = 0; - i < size / sizeof(__le32); - i++, prev++, cur++, delta++, max_delta++, accum++) { - if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) { - *delta = cpu_to_le32( - le32_to_cpu(*cur) - le32_to_cpu(*prev)); - le32_add_cpu(accum, le32_to_cpu(*delta)); - if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta)) - *max_delta = *delta; - } - } -} - -static void -iwlagn_accumulative_statistics(struct iwl_priv *priv, - struct statistics_general_common *common, - struct statistics_rx_non_phy *rx_non_phy, - struct statistics_rx_phy *rx_ofdm, - struct statistics_rx_ht_phy *rx_ofdm_ht, - struct statistics_rx_phy *rx_cck, - struct statistics_tx *tx, - struct statistics_bt_activity *bt_activity) -{ -#define ACCUM(_name) \ - accum_stats((__le32 *)&priv->statistics._name, \ - (__le32 *)_name, \ - (__le32 *)&priv->delta_stats._name, \ - (__le32 *)&priv->max_delta_stats._name, \ - (__le32 *)&priv->accum_stats._name, \ - sizeof(*_name)); - - ACCUM(common); - ACCUM(rx_non_phy); - ACCUM(rx_ofdm); - ACCUM(rx_ofdm_ht); - ACCUM(rx_cck); - ACCUM(tx); - if (bt_activity) - ACCUM(bt_activity); -#undef ACCUM -} -#else -static inline void -iwlagn_accumulative_statistics(struct iwl_priv *priv, - struct statistics_general_common *common, - struct statistics_rx_non_phy *rx_non_phy, - struct statistics_rx_phy *rx_ofdm, - struct statistics_rx_ht_phy *rx_ofdm_ht, - struct statistics_rx_phy *rx_cck, - struct statistics_tx *tx, - struct statistics_bt_activity *bt_activity) -{ -} -#endif - -static void iwlagn_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - unsigned long stamp = jiffies; - const int reg_recalib_period = 60; - int change; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 len = iwl_rx_packet_payload_len(pkt); - __le32 *flag; - struct statistics_general_common *common; - struct statistics_rx_non_phy *rx_non_phy; - struct statistics_rx_phy *rx_ofdm; - struct statistics_rx_ht_phy *rx_ofdm_ht; - struct statistics_rx_phy *rx_cck; - struct statistics_tx *tx; - struct statistics_bt_activity *bt_activity; - - IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n", - len); - - spin_lock(&priv->statistics.lock); - - if (len == sizeof(struct iwl_bt_notif_statistics)) { - struct iwl_bt_notif_statistics *stats; - stats = (void *)&pkt->data; - flag = &stats->flag; - common = &stats->general.common; - rx_non_phy = &stats->rx.general.common; - rx_ofdm = &stats->rx.ofdm; - rx_ofdm_ht = &stats->rx.ofdm_ht; - rx_cck = &stats->rx.cck; - tx = &stats->tx; - bt_activity = &stats->general.activity; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* handle this exception directly */ - priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills; - le32_add_cpu(&priv->statistics.accum_num_bt_kills, - le32_to_cpu(stats->rx.general.num_bt_kills)); -#endif - } else if (len == sizeof(struct iwl_notif_statistics)) { - struct iwl_notif_statistics *stats; - stats = (void *)&pkt->data; - flag = &stats->flag; - common = &stats->general.common; - rx_non_phy = &stats->rx.general; - rx_ofdm = &stats->rx.ofdm; - rx_ofdm_ht = &stats->rx.ofdm_ht; - rx_cck = &stats->rx.cck; - tx = &stats->tx; - bt_activity = NULL; - } else { - WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n", - len, sizeof(struct iwl_bt_notif_statistics), - sizeof(struct iwl_notif_statistics)); - spin_unlock(&priv->statistics.lock); - return; - } - - change = common->temperature != priv->statistics.common.temperature || - (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) != - (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK); - - iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm, - rx_ofdm_ht, rx_cck, tx, bt_activity); - - iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp); - - priv->statistics.flag = *flag; - memcpy(&priv->statistics.common, common, sizeof(*common)); - memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy)); - memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm)); - memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht)); - memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck)); - memcpy(&priv->statistics.tx, tx, sizeof(*tx)); -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (bt_activity) - memcpy(&priv->statistics.bt_activity, bt_activity, - sizeof(*bt_activity)); -#endif - - priv->rx_statistics_jiffies = stamp; - - set_bit(STATUS_STATISTICS, &priv->status); - - /* Reschedule the statistics timer to occur in - * reg_recalib_period seconds to ensure we get a - * thermal update even if the uCode doesn't give - * us one */ - mod_timer(&priv->statistics_periodic, jiffies + - msecs_to_jiffies(reg_recalib_period * 1000)); - - if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && - (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { - iwlagn_rx_calc_noise(priv); - queue_work(priv->workqueue, &priv->run_time_calib_work); - } - if (priv->lib->temperature && change) - priv->lib->temperature(priv); - - spin_unlock(&priv->statistics.lock); -} - -static void iwlagn_rx_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_notif_statistics *stats = (void *)pkt->data; - - if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_DEBUGFS - memset(&priv->accum_stats, 0, - sizeof(priv->accum_stats)); - memset(&priv->delta_stats, 0, - sizeof(priv->delta_stats)); - memset(&priv->max_delta_stats, 0, - sizeof(priv->max_delta_stats)); -#endif - IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); - } - - iwlagn_rx_statistics(priv, rxb); -} - -/* Handle notification from uCode that card's power state is changing - * due to software, hardware, or critical temperature RFKILL */ -static void iwlagn_rx_card_state_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; - u32 flags = le32_to_cpu(card_state_notif->flags); - unsigned long status = priv->status; - - IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_CARD_DISABLED) ? - "Reached" : "Not reached"); - - if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | - CT_CARD_DISABLED)) { - - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - - if (!(flags & RXON_CARD_DISABLED)) { - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - } - if (flags & CT_CARD_DISABLED) - iwl_tt_enter_ct_kill(priv); - } - if (!(flags & CT_CARD_DISABLED)) - iwl_tt_exit_ct_kill(priv); - - if (flags & HW_CARD_DISABLED) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - - if (!(flags & RXON_CARD_DISABLED)) - iwl_scan_cancel(priv); - - if ((test_bit(STATUS_RF_KILL_HW, &status) != - test_bit(STATUS_RF_KILL_HW, &priv->status))) - wiphy_rfkill_set_hw_state(priv->hw->wiphy, - test_bit(STATUS_RF_KILL_HW, &priv->status)); -} - -static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) - -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data; - - if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > - priv->missed_beacon_threshold) { - IWL_DEBUG_CALIB(priv, - "missed bcn cnsq %d totl %d rcd %d expctd %d\n", - le32_to_cpu(missed_beacon->consecutive_missed_beacons), - le32_to_cpu(missed_beacon->total_missed_becons), - le32_to_cpu(missed_beacon->num_recvd_beacons), - le32_to_cpu(missed_beacon->num_expected_beacons)); - if (!test_bit(STATUS_SCANNING, &priv->status)) - iwl_init_sensitivity(priv); - } -} - -/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). - * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ -static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - priv->last_phy_res_valid = true; - priv->ampdu_ref++; - memcpy(&priv->last_phy_res, pkt->data, - sizeof(struct iwl_rx_phy_res)); -} - -/* - * returns non-zero if packet should be dropped - */ -static int iwlagn_set_decrypted_flag(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u32 decrypt_res, - struct ieee80211_rx_status *stats) -{ - u16 fc = le16_to_cpu(hdr->frame_control); - - /* - * All contexts have the same setting here due to it being - * a module parameter, so OK to check any context. - */ - if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & - RXON_FILTER_DIS_DECRYPT_MSK) - return 0; - - if (!(fc & IEEE80211_FCTL_PROTECTED)) - return 0; - - IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); - switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { - case RX_RES_STATUS_SEC_TYPE_TKIP: - /* The uCode has got a bad phase 1 Key, pushes the packet. - * Decryption will be done in SW. */ - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_BAD_KEY_TTAK) - break; - - case RX_RES_STATUS_SEC_TYPE_WEP: - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_BAD_ICV_MIC) { - /* bad ICV, the packet is destroyed since the - * decryption is inplace, drop it */ - IWL_DEBUG_RX(priv, "Packet destroyed\n"); - return -1; - } - case RX_RES_STATUS_SEC_TYPE_CCMP: - if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == - RX_RES_STATUS_DECRYPT_OK) { - IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); - stats->flag |= RX_FLAG_DECRYPTED; - } - break; - - default: - break; - } - return 0; -} - -static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u16 len, - u32 ampdu_status, - struct iwl_rx_cmd_buffer *rxb, - struct ieee80211_rx_status *stats) -{ - struct sk_buff *skb; - __le16 fc = hdr->frame_control; - struct iwl_rxon_context *ctx; - unsigned int hdrlen, fraglen; - - /* We only process data packets if the interface is open */ - if (unlikely(!priv->is_open)) { - IWL_DEBUG_DROP_LIMIT(priv, - "Dropping packet while interface is not open.\n"); - return; - } - - /* In case of HW accelerated crypto and bad decryption, drop */ - if (!iwlwifi_mod_params.sw_crypto && - iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) - return; - - /* Dont use dev_alloc_skb(), we'll have enough headroom once - * ieee80211_hdr pulled. - */ - skb = alloc_skb(128, GFP_ATOMIC); - if (!skb) { - IWL_ERR(priv, "alloc_skb failed\n"); - return; - } - /* If frame is small enough to fit in skb->head, pull it completely. - * If not, only pull ieee80211_hdr so that splice() or TCP coalesce - * are more efficient. - */ - hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); - - memcpy(skb_put(skb, hdrlen), hdr, hdrlen); - fraglen = len - hdrlen; - - if (fraglen) { - int offset = (void *)hdr + hdrlen - - rxb_addr(rxb) + rxb_offset(rxb); - - skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, - fraglen, rxb->truesize); - } - - /* - * Wake any queues that were stopped due to a passive channel tx - * failure. This can happen because the regulatory enforcement in - * the device waits for a beacon before allowing transmission, - * sometimes even after already having transmitted frames for the - * association because the new RXON may reset the information. - */ - if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) { - for_each_context(priv, ctx) { - if (!ether_addr_equal(hdr->addr3, - ctx->active.bssid_addr)) - continue; - iwlagn_lift_passive_no_rx(priv); - } - } - - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - - ieee80211_rx_napi(priv->hw, skb, priv->napi); -} - -static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) -{ - u32 decrypt_out = 0; - - if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == - RX_RES_STATUS_STATION_FOUND) - decrypt_out |= (RX_RES_STATUS_STATION_FOUND | - RX_RES_STATUS_NO_STATION_INFO_MISMATCH); - - decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); - - /* packet was not encrypted */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_NONE) - return decrypt_out; - - /* packet was encrypted with unknown alg */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_ERR) - return decrypt_out; - - /* decryption was not done in HW */ - if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != - RX_MPDU_RES_STATUS_DEC_DONE_MSK) - return decrypt_out; - - switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { - - case RX_RES_STATUS_SEC_TYPE_CCMP: - /* alg is CCM: check MIC only */ - if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) - /* Bad MIC */ - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - - break; - - case RX_RES_STATUS_SEC_TYPE_TKIP: - if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { - /* Bad TTAK */ - decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; - break; - } - /* fall through if TTAK OK */ - default: - if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - break; - } - - IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", - decrypt_in, decrypt_out); - - return decrypt_out; -} - -/* Calc max signal level (dBm) among 3 possible receivers */ -static int iwlagn_calc_rssi(struct iwl_priv *priv, - struct iwl_rx_phy_res *rx_resp) -{ - /* data from PHY/DSP regarding signal strength, etc., - * contents are always there, not configurable by host - */ - struct iwlagn_non_cfg_phy *ncphy = - (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf; - u32 val, rssi_a, rssi_b, rssi_c, max_rssi; - u8 agc; - - val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]); - agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS; - - /* Find max rssi among 3 possible receivers. - * These values are measured by the digital signal processor (DSP). - * They should stay fairly constant even as the signal strength varies, - * if the radio's automatic gain control (AGC) is working right. - * AGC value (see below) will provide the "interesting" info. - */ - val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]); - rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >> - IWLAGN_OFDM_RSSI_A_BIT_POS; - rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >> - IWLAGN_OFDM_RSSI_B_BIT_POS; - val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]); - rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >> - IWLAGN_OFDM_RSSI_C_BIT_POS; - - max_rssi = max_t(u32, rssi_a, rssi_b); - max_rssi = max_t(u32, max_rssi, rssi_c); - - IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", - rssi_a, rssi_b, rssi_c, max_rssi, agc); - - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc - IWLAGN_RSSI_OFFSET; -} - -/* Called for REPLY_RX_MPDU_CMD */ -static void iwlagn_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct ieee80211_hdr *header; - struct ieee80211_rx_status rx_status = {}; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_rx_phy_res *phy_res; - __le32 rx_pkt_status; - struct iwl_rx_mpdu_res_start *amsdu; - u32 len; - u32 ampdu_status; - u32 rate_n_flags; - - if (!priv->last_phy_res_valid) { - IWL_ERR(priv, "MPDU frame without cached PHY data\n"); - return; - } - phy_res = &priv->last_phy_res; - amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data; - header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu)); - len = le16_to_cpu(amsdu->byte_count); - rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len); - ampdu_status = iwlagn_translate_rx_status(priv, - le32_to_cpu(rx_pkt_status)); - - if ((unlikely(phy_res->cfg_phy_cnt > 20))) { - IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n", - phy_res->cfg_phy_cnt); - return; - } - - if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || - !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { - IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", - le32_to_cpu(rx_pkt_status)); - return; - } - - /* This will be used in several places later */ - rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); - - /* rx_status carries information about the packet to mac80211 */ - rx_status.mactime = le64_to_cpu(phy_res->timestamp); - rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), - rx_status.band); - rx_status.rate_idx = - iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); - rx_status.flag = 0; - - /* TSF isn't reliable. In order to allow smooth user experience, - * this W/A doesn't propagate it to the mac80211 */ - /*rx_status.flag |= RX_FLAG_MACTIME_START;*/ - - priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); - - /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = iwlagn_calc_rssi(priv, phy_res); - - IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", - rx_status.signal, (unsigned long long)rx_status.mactime); - - /* - * "antenna number" - * - * It seems that the antenna field in the phy flags value - * is actually a bit field. This is undefined by radiotap, - * it wants an actual antenna number but I always get "7" - * for most legacy frames I receive indicating that the - * same frame was received on all three RX chains. - * - * I think this field should be removed in favor of a - * new 802.11n radiotap field "RX chains" that is defined - * as a bitmask. - */ - rx_status.antenna = - (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) - >> RX_RES_PHY_FLAGS_ANTENNA_POS; - - /* set the preamble flag if appropriate */ - if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; - - if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { - /* - * We know which subframes of an A-MPDU belong - * together since we get a single PHY response - * from the firmware for all of them - */ - rx_status.flag |= RX_FLAG_AMPDU_DETAILS; - rx_status.ampdu_reference = priv->ampdu_ref; - } - - /* Set up the HT phy flags */ - if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; - if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; - if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; - if (rate_n_flags & RATE_MCS_GF_MSK) - rx_status.flag |= RX_FLAG_HT_GF; - - iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, - rxb, &rx_status); -} - -static void iwlagn_rx_noa_notification(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_wipan_noa_data *new_data, *old_data; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data; - - /* no condition -- we're in softirq */ - old_data = rcu_dereference_protected(priv->noa_data, true); - - if (noa_notif->noa_active) { - u32 len = le16_to_cpu(noa_notif->noa_attribute.length); - u32 copylen = len; - - /* EID, len, OUI, subtype */ - len += 1 + 1 + 3 + 1; - /* P2P id, P2P length */ - len += 1 + 2; - copylen += 1 + 2; - - new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC); - if (new_data) { - new_data->length = len; - new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC; - new_data->data[1] = len - 2; /* not counting EID, len */ - new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff; - new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff; - new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff; - new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P; - memcpy(&new_data->data[6], &noa_notif->noa_attribute, - copylen); - } - } else - new_data = NULL; - - rcu_assign_pointer(priv->noa_data, new_data); - - if (old_data) - kfree_rcu(old_data, rcu_head); -} - -/** - * iwl_setup_rx_handlers - Initialize Rx handler callbacks - * - * Setup the RX handlers for each of the reply types sent from the uCode - * to the host. - */ -void iwl_setup_rx_handlers(struct iwl_priv *priv) -{ - void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); - - handlers = priv->rx_handlers; - - handlers[REPLY_ERROR] = iwlagn_rx_reply_error; - handlers[CHANNEL_SWITCH_NOTIFICATION] = iwlagn_rx_csa; - handlers[SPECTRUM_MEASURE_NOTIFICATION] = - iwlagn_rx_spectrum_measure_notif; - handlers[PM_SLEEP_NOTIFICATION] = iwlagn_rx_pm_sleep_notif; - handlers[PM_DEBUG_STATISTIC_NOTIFIC] = - iwlagn_rx_pm_debug_statistics_notif; - handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif; - handlers[REPLY_ADD_STA] = iwl_add_sta_callback; - - handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification; - - /* - * The same handler is used for both the REPLY to a discrete - * statistics request from the host as well as for the periodic - * statistics notifications (after received beacons) from the uCode. - */ - handlers[REPLY_STATISTICS_CMD] = iwlagn_rx_reply_statistics; - handlers[STATISTICS_NOTIFICATION] = iwlagn_rx_statistics; - - iwl_setup_rx_scan_handlers(priv); - - handlers[CARD_STATE_NOTIFICATION] = iwlagn_rx_card_state_notif; - handlers[MISSED_BEACONS_NOTIFICATION] = - iwlagn_rx_missed_beacon_notif; - - /* Rx handlers */ - handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy; - handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx; - - /* block ack */ - handlers[REPLY_COMPRESSED_BA] = - iwlagn_rx_reply_compressed_ba; - - priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; - - /* set up notification wait support */ - iwl_notification_wait_init(&priv->notif_wait); - - /* Set up BT Rx handlers */ - if (priv->lib->bt_params) - iwlagn_bt_rx_handler_setup(priv); -} - -void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - /* - * Do the notification wait before RX handlers so - * even if the RX handler consumes the RXB we have - * access to it in the notification wait entry. - */ - iwl_notification_wait_notify(&priv->notif_wait, pkt); - - /* Based on type of command response or notification, - * handle those that need handling via function in - * rx_handlers table. See iwl_setup_rx_handlers() */ - if (priv->rx_handlers[pkt->hdr.cmd]) { - priv->rx_handlers_stats[pkt->hdr.cmd]++; - priv->rx_handlers[pkt->hdr.cmd](priv, rxb); - } else { - /* No handling needed */ - IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n", - iwl_dvm_get_cmd_string(pkt->hdr.cmd), - pkt->hdr.cmd); - } -} diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c deleted file mode 100644 index 85ceceb34fcc..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ /dev/null @@ -1,1572 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include "iwl-trans.h" -#include "iwl-modparams.h" -#include "dev.h" -#include "agn.h" -#include "calib.h" - -/* - * initialize rxon structure with default values from eeprom - */ -void iwl_connection_init_rx_config(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - memset(&ctx->staging, 0, sizeof(ctx->staging)); - - if (!ctx->vif) { - ctx->staging.dev_type = ctx->unused_devtype; - } else - switch (ctx->vif->type) { - case NL80211_IFTYPE_AP: - ctx->staging.dev_type = ctx->ap_devtype; - break; - - case NL80211_IFTYPE_STATION: - ctx->staging.dev_type = ctx->station_devtype; - ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; - break; - - case NL80211_IFTYPE_ADHOC: - ctx->staging.dev_type = ctx->ibss_devtype; - ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; - ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | - RXON_FILTER_ACCEPT_GRP_MSK; - break; - - case NL80211_IFTYPE_MONITOR: - ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER; - break; - - default: - IWL_ERR(priv, "Unsupported interface type %d\n", - ctx->vif->type); - break; - } - -#if 0 - /* TODO: Figure out when short_preamble would be set and cache from - * that */ - if (!hw_to_local(priv->hw)->short_preamble) - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; -#endif - - ctx->staging.channel = - cpu_to_le16(priv->hw->conf.chandef.chan->hw_value); - priv->band = priv->hw->conf.chandef.chan->band; - - iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); - - /* clear both MIX and PURE40 mode flag */ - ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | - RXON_FLG_CHANNEL_MODE_PURE_40); - if (ctx->vif) - memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); - - ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; - ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; - ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; -} - -static int iwlagn_disable_bss(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_rxon_cmd *send) -{ - __le32 old_filter = send->filter_flags; - int ret; - - send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, - 0, sizeof(*send), send); - - send->filter_flags = old_filter; - - if (ret) - IWL_DEBUG_QUIET_RFKILL(priv, - "Error clearing ASSOC_MSK on BSS (%d)\n", ret); - - return ret; -} - -static int iwlagn_disable_pan(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_rxon_cmd *send) -{ - struct iwl_notification_wait disable_wait; - __le32 old_filter = send->filter_flags; - u8 old_dev_type = send->dev_type; - int ret; - static const u16 deactivate_cmd[] = { - REPLY_WIPAN_DEACTIVATION_COMPLETE - }; - - iwl_init_notification_wait(&priv->notif_wait, &disable_wait, - deactivate_cmd, ARRAY_SIZE(deactivate_cmd), - NULL, NULL); - - send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - send->dev_type = RXON_DEV_TYPE_P2P; - ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, - 0, sizeof(*send), send); - - send->filter_flags = old_filter; - send->dev_type = old_dev_type; - - if (ret) { - IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); - iwl_remove_notification(&priv->notif_wait, &disable_wait); - } else { - ret = iwl_wait_notification(&priv->notif_wait, - &disable_wait, HZ); - if (ret) - IWL_ERR(priv, "Timed out waiting for PAN disable\n"); - } - - return ret; -} - -static int iwlagn_disconn_pan(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_rxon_cmd *send) -{ - __le32 old_filter = send->filter_flags; - int ret; - - send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, - sizeof(*send), send); - - send->filter_flags = old_filter; - - return ret; -} - -static void iwlagn_update_qos(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int ret; - - if (!ctx->is_active) - return; - - ctx->qos_data.def_qos_parm.qos_flags = 0; - - if (ctx->qos_data.qos_active) - ctx->qos_data.def_qos_parm.qos_flags |= - QOS_PARAM_FLG_UPDATE_EDCA_MSK; - - if (ctx->ht.enabled) - ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; - - IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", - ctx->qos_data.qos_active, - ctx->qos_data.def_qos_parm.qos_flags); - - ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0, - sizeof(struct iwl_qosparam_cmd), - &ctx->qos_data.def_qos_parm); - if (ret) - IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); -} - -static int iwlagn_update_beacon(struct iwl_priv *priv, - struct ieee80211_vif *vif) -{ - lockdep_assert_held(&priv->mutex); - - dev_kfree_skb(priv->beacon_skb); - priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif); - if (!priv->beacon_skb) - return -ENOMEM; - return iwlagn_send_beacon_cmd(priv); -} - -static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int ret = 0; - struct iwl_rxon_assoc_cmd rxon_assoc; - const struct iwl_rxon_cmd *rxon1 = &ctx->staging; - const struct iwl_rxon_cmd *rxon2 = &ctx->active; - - if ((rxon1->flags == rxon2->flags) && - (rxon1->filter_flags == rxon2->filter_flags) && - (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && - (rxon1->ofdm_ht_single_stream_basic_rates == - rxon2->ofdm_ht_single_stream_basic_rates) && - (rxon1->ofdm_ht_dual_stream_basic_rates == - rxon2->ofdm_ht_dual_stream_basic_rates) && - (rxon1->ofdm_ht_triple_stream_basic_rates == - rxon2->ofdm_ht_triple_stream_basic_rates) && - (rxon1->acquisition_data == rxon2->acquisition_data) && - (rxon1->rx_chain == rxon2->rx_chain) && - (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { - IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); - return 0; - } - - rxon_assoc.flags = ctx->staging.flags; - rxon_assoc.filter_flags = ctx->staging.filter_flags; - rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; - rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; - rxon_assoc.reserved1 = 0; - rxon_assoc.reserved2 = 0; - rxon_assoc.reserved3 = 0; - rxon_assoc.ofdm_ht_single_stream_basic_rates = - ctx->staging.ofdm_ht_single_stream_basic_rates; - rxon_assoc.ofdm_ht_dual_stream_basic_rates = - ctx->staging.ofdm_ht_dual_stream_basic_rates; - rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; - rxon_assoc.ofdm_ht_triple_stream_basic_rates = - ctx->staging.ofdm_ht_triple_stream_basic_rates; - rxon_assoc.acquisition_data = ctx->staging.acquisition_data; - - ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd, - CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc); - return ret; -} - -static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) -{ - u16 new_val; - u16 beacon_factor; - - /* - * If mac80211 hasn't given us a beacon interval, program - * the default into the device (not checking this here - * would cause the adjustment below to return the maximum - * value, which may break PAN.) - */ - if (!beacon_val) - return DEFAULT_BEACON_INTERVAL; - - /* - * If the beacon interval we obtained from the peer - * is too large, we'll have to wake up more often - * (and in IBSS case, we'll beacon too much) - * - * For example, if max_beacon_val is 4096, and the - * requested beacon interval is 7000, we'll have to - * use 3500 to be able to wake up on the beacons. - * - * This could badly influence beacon detection stats. - */ - - beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; - new_val = beacon_val / beacon_factor; - - if (!new_val) - new_val = max_beacon_val; - - return new_val; -} - -static int iwl_send_rxon_timing(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - u64 tsf; - s32 interval_tm, rem; - struct ieee80211_conf *conf = NULL; - u16 beacon_int; - struct ieee80211_vif *vif = ctx->vif; - - conf = &priv->hw->conf; - - lockdep_assert_held(&priv->mutex); - - memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); - - ctx->timing.timestamp = cpu_to_le64(priv->timestamp); - ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); - - beacon_int = vif ? vif->bss_conf.beacon_int : 0; - - /* - * TODO: For IBSS we need to get atim_window from mac80211, - * for now just always use 0 - */ - ctx->timing.atim_window = 0; - - if (ctx->ctxid == IWL_RXON_CTX_PAN && - (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) && - iwl_is_associated(priv, IWL_RXON_CTX_BSS) && - priv->contexts[IWL_RXON_CTX_BSS].vif && - priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) { - ctx->timing.beacon_interval = - priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval; - beacon_int = le16_to_cpu(ctx->timing.beacon_interval); - } else if (ctx->ctxid == IWL_RXON_CTX_BSS && - iwl_is_associated(priv, IWL_RXON_CTX_PAN) && - priv->contexts[IWL_RXON_CTX_PAN].vif && - priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int && - (!iwl_is_associated_ctx(ctx) || !ctx->vif || - !ctx->vif->bss_conf.beacon_int)) { - ctx->timing.beacon_interval = - priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval; - beacon_int = le16_to_cpu(ctx->timing.beacon_interval); - } else { - beacon_int = iwl_adjust_beacon_interval(beacon_int, - IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT); - ctx->timing.beacon_interval = cpu_to_le16(beacon_int); - } - - ctx->beacon_int = beacon_int; - - tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ - interval_tm = beacon_int * TIME_UNIT; - rem = do_div(tsf, interval_tm); - ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); - - ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; - - IWL_DEBUG_ASSOC(priv, - "beacon interval %d beacon timer %d beacon tim %d\n", - le16_to_cpu(ctx->timing.beacon_interval), - le32_to_cpu(ctx->timing.beacon_init_val), - le16_to_cpu(ctx->timing.atim_window)); - - return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, - 0, sizeof(ctx->timing), &ctx->timing); -} - -static int iwlagn_rxon_disconn(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int ret; - struct iwl_rxon_cmd *active = (void *)&ctx->active; - - if (ctx->ctxid == IWL_RXON_CTX_BSS) { - ret = iwlagn_disable_bss(priv, ctx, &ctx->staging); - } else { - ret = iwlagn_disable_pan(priv, ctx, &ctx->staging); - if (ret) - return ret; - if (ctx->vif) { - ret = iwl_send_rxon_timing(priv, ctx); - if (ret) { - IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); - return ret; - } - ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging); - } - } - if (ret) - return ret; - - /* - * Un-assoc RXON clears the station table and WEP - * keys, so we have to restore those afterwards. - */ - iwl_clear_ucode_stations(priv, ctx); - /* update -- might need P2P now */ - iwl_update_bcast_station(priv, ctx); - iwl_restore_stations(priv, ctx); - ret = iwl_restore_default_wep_keys(priv, ctx); - if (ret) { - IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); - return ret; - } - - memcpy(active, &ctx->staging, sizeof(*active)); - return 0; -} - -static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) -{ - int ret; - s8 prev_tx_power; - bool defer; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED) - return 0; - - lockdep_assert_held(&priv->mutex); - - if (priv->tx_power_user_lmt == tx_power && !force) - return 0; - - if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { - IWL_WARN(priv, - "Requested user TXPOWER %d below lower limit %d.\n", - tx_power, - IWLAGN_TX_POWER_TARGET_POWER_MIN); - return -EINVAL; - } - - if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) { - IWL_WARN(priv, - "Requested user TXPOWER %d above upper limit %d.\n", - tx_power, priv->nvm_data->max_tx_pwr_half_dbm); - return -EINVAL; - } - - if (!iwl_is_ready_rf(priv)) - return -EIO; - - /* scan complete and commit_rxon use tx_power_next value, - * it always need to be updated for newest request */ - priv->tx_power_next = tx_power; - - /* do not set tx power when scanning or channel changing */ - defer = test_bit(STATUS_SCANNING, &priv->status) || - memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); - if (defer && !force) { - IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); - return 0; - } - - prev_tx_power = priv->tx_power_user_lmt; - priv->tx_power_user_lmt = tx_power; - - ret = iwlagn_send_tx_power(priv); - - /* if fail to set tx_power, restore the orig. tx power */ - if (ret) { - priv->tx_power_user_lmt = prev_tx_power; - priv->tx_power_next = prev_tx_power; - } - return ret; -} - -static int iwlagn_rxon_connect(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int ret; - struct iwl_rxon_cmd *active = (void *)&ctx->active; - - /* RXON timing must be before associated RXON */ - if (ctx->ctxid == IWL_RXON_CTX_BSS) { - ret = iwl_send_rxon_timing(priv, ctx); - if (ret) { - IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); - return ret; - } - } - /* QoS info may be cleared by previous un-assoc RXON */ - iwlagn_update_qos(priv, ctx); - - /* - * We'll run into this code path when beaconing is - * enabled, but then we also need to send the beacon - * to the device. - */ - if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) { - ret = iwlagn_update_beacon(priv, ctx->vif); - if (ret) { - IWL_ERR(priv, - "Error sending required beacon (%d)!\n", - ret); - return ret; - } - } - - priv->start_calib = 0; - /* - * Apply the new configuration. - * - * Associated RXON doesn't clear the station table in uCode, - * so we don't need to restore stations etc. after this. - */ - ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, - sizeof(struct iwl_rxon_cmd), &ctx->staging); - if (ret) { - IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); - return ret; - } - memcpy(active, &ctx->staging, sizeof(*active)); - - /* IBSS beacon needs to be sent after setting assoc */ - if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC)) - if (iwlagn_update_beacon(priv, ctx->vif)) - IWL_ERR(priv, "Error sending IBSS beacon\n"); - iwl_init_sensitivity(priv); - - /* - * If we issue a new RXON command which required a tune then - * we must send a new TXPOWER command or we won't be able to - * Tx any frames. - * - * It's expected we set power here if channel is changing. - */ - ret = iwl_set_tx_power(priv, priv->tx_power_next, true); - if (ret) { - IWL_ERR(priv, "Error sending TX power (%d)\n", ret); - return ret; - } - - if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && - priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) - ieee80211_request_smps(ctx->vif, - priv->cfg->ht_params->smps_mode); - - return 0; -} - -int iwlagn_set_pan_params(struct iwl_priv *priv) -{ - struct iwl_wipan_params_cmd cmd; - struct iwl_rxon_context *ctx_bss, *ctx_pan; - int slot0 = 300, slot1 = 0; - int ret; - - if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) - return 0; - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - lockdep_assert_held(&priv->mutex); - - ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; - ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; - - /* - * If the PAN context is inactive, then we don't need - * to update the PAN parameters, the last thing we'll - * have done before it goes inactive is making the PAN - * parameters be WLAN-only. - */ - if (!ctx_pan->is_active) - return 0; - - memset(&cmd, 0, sizeof(cmd)); - - /* only 2 slots are currently allowed */ - cmd.num_slots = 2; - - cmd.slots[0].type = 0; /* BSS */ - cmd.slots[1].type = 1; /* PAN */ - - if (ctx_bss->vif && ctx_pan->vif) { - int bcnint = ctx_pan->beacon_int; - int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; - - /* should be set, but seems unused?? */ - cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); - - if (ctx_pan->vif->type == NL80211_IFTYPE_AP && - bcnint && - bcnint != ctx_bss->beacon_int) { - IWL_ERR(priv, - "beacon intervals don't match (%d, %d)\n", - ctx_bss->beacon_int, ctx_pan->beacon_int); - } else - bcnint = max_t(int, bcnint, - ctx_bss->beacon_int); - if (!bcnint) - bcnint = DEFAULT_BEACON_INTERVAL; - slot0 = bcnint / 2; - slot1 = bcnint - slot0; - - if (test_bit(STATUS_SCAN_HW, &priv->status) || - (!ctx_bss->vif->bss_conf.idle && - !ctx_bss->vif->bss_conf.assoc)) { - slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; - slot1 = IWL_MIN_SLOT_TIME; - } else if (!ctx_pan->vif->bss_conf.idle && - !ctx_pan->vif->bss_conf.assoc) { - slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; - slot0 = IWL_MIN_SLOT_TIME; - } - } else if (ctx_pan->vif) { - slot0 = 0; - slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * - ctx_pan->beacon_int; - slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); - - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; - slot1 = IWL_MIN_SLOT_TIME; - } - } - - cmd.slots[0].width = cpu_to_le16(slot0); - cmd.slots[1].width = cpu_to_le16(slot1); - - ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); - - return ret; -} - -static void _iwl_set_rxon_ht(struct iwl_priv *priv, - struct iwl_ht_config *ht_conf, - struct iwl_rxon_context *ctx) -{ - struct iwl_rxon_cmd *rxon = &ctx->staging; - - if (!ctx->ht.enabled) { - rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | - RXON_FLG_HT40_PROT_MSK | - RXON_FLG_HT_PROT_MSK); - return; - } - - /* FIXME: if the definition of ht.protection changed, the "translation" - * will be needed for rxon->flags - */ - rxon->flags |= cpu_to_le32(ctx->ht.protection << - RXON_FLG_HT_OPERATING_MODE_POS); - - /* Set up channel bandwidth: - * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ - /* clear the HT channel mode before set the mode */ - rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) { - /* pure ht40 */ - if (ctx->ht.protection == - IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { - rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; - /* - * Note: control channel is opposite of extension - * channel - */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= - ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - } - } else { - /* - * Note: control channel is opposite of extension - * channel - */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= - ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_NONE: - default: - /* - * channel location only valid if in Mixed - * mode - */ - IWL_ERR(priv, - "invalid extension channel offset\n"); - break; - } - } - } else { - rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; - } - - iwlagn_set_rxon_chain(priv, ctx); - - IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " - "extension channel offset 0x%x\n", - le32_to_cpu(rxon->flags), ctx->ht.protection, - ctx->ht.extension_chan_offset); -} - -void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) -{ - struct iwl_rxon_context *ctx; - - for_each_context(priv, ctx) - _iwl_set_rxon_ht(priv, ht_conf, ctx); -} - -/** - * iwl_set_rxon_channel - Set the band and channel values in staging RXON - * @ch: requested channel as a pointer to struct ieee80211_channel - - * NOTE: Does not commit to the hardware; it sets appropriate bit fields - * in the staging RXON flag structure based on the ch->band - */ -void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, - struct iwl_rxon_context *ctx) -{ - enum ieee80211_band band = ch->band; - u16 channel = ch->hw_value; - - if ((le16_to_cpu(ctx->staging.channel) == channel) && - (priv->band == band)) - return; - - ctx->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) - ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; - else - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - - priv->band = band; - - IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); - -} - -void iwl_set_flags_for_band(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - enum ieee80211_band band, - struct ieee80211_vif *vif) -{ - if (band == IEEE80211_BAND_5GHZ) { - ctx->staging.flags &= - ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK - | RXON_FLG_CCK_MSK); - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - } else { - /* Copied from iwl_post_associate() */ - if (vif && vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; - ctx->staging.flags &= ~RXON_FLG_CCK_MSK; - } -} - -static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, int hw_decrypt) -{ - struct iwl_rxon_cmd *rxon = &ctx->staging; - - if (hw_decrypt) - rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; - else - rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; - -} - -/* validate RXON structure is valid */ -static int iwl_check_rxon_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct iwl_rxon_cmd *rxon = &ctx->staging; - u32 errors = 0; - - if (rxon->flags & RXON_FLG_BAND_24G_MSK) { - if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { - IWL_WARN(priv, "check 2.4G: wrong narrow\n"); - errors |= BIT(0); - } - if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { - IWL_WARN(priv, "check 2.4G: wrong radar\n"); - errors |= BIT(1); - } - } else { - if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { - IWL_WARN(priv, "check 5.2G: not short slot!\n"); - errors |= BIT(2); - } - if (rxon->flags & RXON_FLG_CCK_MSK) { - IWL_WARN(priv, "check 5.2G: CCK!\n"); - errors |= BIT(3); - } - } - if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { - IWL_WARN(priv, "mac/bssid mcast!\n"); - errors |= BIT(4); - } - - /* make sure basic rates 6Mbps and 1Mbps are supported */ - if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && - (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { - IWL_WARN(priv, "neither 1 nor 6 are basic\n"); - errors |= BIT(5); - } - - if (le16_to_cpu(rxon->assoc_id) > 2007) { - IWL_WARN(priv, "aid > 2007\n"); - errors |= BIT(6); - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) - == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { - IWL_WARN(priv, "CCK and short slot\n"); - errors |= BIT(7); - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) - == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { - IWL_WARN(priv, "CCK and auto detect\n"); - errors |= BIT(8); - } - - if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | - RXON_FLG_TGG_PROTECT_MSK)) == - RXON_FLG_TGG_PROTECT_MSK) { - IWL_WARN(priv, "TGg but no auto-detect\n"); - errors |= BIT(9); - } - - if (rxon->channel == 0) { - IWL_WARN(priv, "zero channel is invalid\n"); - errors |= BIT(10); - } - - WARN(errors, "Invalid RXON (%#x), channel %d", - errors, le16_to_cpu(rxon->channel)); - - return errors ? -EINVAL : 0; -} - -/** - * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed - * @priv: staging_rxon is compared to active_rxon - * - * If the RXON structure is changing enough to require a new tune, - * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that - * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. - */ -static int iwl_full_rxon_required(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - const struct iwl_rxon_cmd *staging = &ctx->staging; - const struct iwl_rxon_cmd *active = &ctx->active; - -#define CHK(cond) \ - if ((cond)) { \ - IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ - return 1; \ - } - -#define CHK_NEQ(c1, c2) \ - if ((c1) != (c2)) { \ - IWL_DEBUG_INFO(priv, "need full RXON - " \ - #c1 " != " #c2 " - %d != %d\n", \ - (c1), (c2)); \ - return 1; \ - } - - /* These items are only settable from the full RXON command */ - CHK(!iwl_is_associated_ctx(ctx)); - CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr)); - CHK(!ether_addr_equal(staging->node_addr, active->node_addr)); - CHK(!ether_addr_equal(staging->wlap_bssid_addr, - active->wlap_bssid_addr)); - CHK_NEQ(staging->dev_type, active->dev_type); - CHK_NEQ(staging->channel, active->channel); - CHK_NEQ(staging->air_propagation, active->air_propagation); - CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, - active->ofdm_ht_single_stream_basic_rates); - CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, - active->ofdm_ht_dual_stream_basic_rates); - CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates, - active->ofdm_ht_triple_stream_basic_rates); - CHK_NEQ(staging->assoc_id, active->assoc_id); - - /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can - * be updated with the RXON_ASSOC command -- however only some - * flag transitions are allowed using RXON_ASSOC */ - - /* Check if we are not switching bands */ - CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, - active->flags & RXON_FLG_BAND_24G_MSK); - - /* Check if we are switching association toggle */ - CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, - active->filter_flags & RXON_FILTER_ASSOC_MSK); - -#undef CHK -#undef CHK_NEQ - - return 0; -} - -#ifdef CONFIG_IWLWIFI_DEBUG -void iwl_print_rx_config_cmd(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid) -{ - struct iwl_rxon_context *ctx = &priv->contexts[ctxid]; - struct iwl_rxon_cmd *rxon = &ctx->staging; - - IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); - iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); - IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", - le16_to_cpu(rxon->channel)); - IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", - le32_to_cpu(rxon->flags)); - IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", - le32_to_cpu(rxon->filter_flags)); - IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); - IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", - rxon->ofdm_basic_rates); - IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", - rxon->cck_basic_rates); - IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); - IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); - IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", - le16_to_cpu(rxon->assoc_id)); -} -#endif - -static void iwl_calc_basic_rates(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int lowest_present_ofdm = 100; - int lowest_present_cck = 100; - u8 cck = 0; - u8 ofdm = 0; - - if (ctx->vif) { - struct ieee80211_supported_band *sband; - unsigned long basic = ctx->vif->bss_conf.basic_rates; - int i; - - sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band]; - - for_each_set_bit(i, &basic, BITS_PER_LONG) { - int hw = sband->bitrates[i].hw_value; - if (hw >= IWL_FIRST_OFDM_RATE) { - ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); - if (lowest_present_ofdm > hw) - lowest_present_ofdm = hw; - } else { - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); - - cck |= BIT(hw); - if (lowest_present_cck > hw) - lowest_present_cck = hw; - } - } - } - - /* - * Now we've got the basic rates as bitmaps in the ofdm and cck - * variables. This isn't sufficient though, as there might not - * be all the right rates in the bitmap. E.g. if the only basic - * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps - * and 6 Mbps because the 802.11-2007 standard says in 9.6: - * - * [...] a STA responding to a received frame shall transmit - * its Control Response frame [...] at the highest rate in the - * BSSBasicRateSet parameter that is less than or equal to the - * rate of the immediately previous frame in the frame exchange - * sequence ([...]) and that is of the same modulation class - * ([...]) as the received frame. If no rate contained in the - * BSSBasicRateSet parameter meets these conditions, then the - * control frame sent in response to a received frame shall be - * transmitted at the highest mandatory rate of the PHY that is - * less than or equal to the rate of the received frame, and - * that is of the same modulation class as the received frame. - * - * As a consequence, we need to add all mandatory rates that are - * lower than all of the basic rates to these bitmaps. - */ - - if (IWL_RATE_24M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE; - if (IWL_RATE_12M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE; - /* 6M already there or needed so always add */ - ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE; - - /* - * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. - * Note, however: - * - if no CCK rates are basic, it must be ERP since there must - * be some basic rates at all, so they're OFDM => ERP PHY - * (or we're in 5 GHz, and the cck bitmap will never be used) - * - if 11M is a basic rate, it must be ERP as well, so add 5.5M - * - if 5.5M is basic, 1M and 2M are mandatory - * - if 2M is basic, 1M is mandatory - * - if 1M is basic, that's the only valid ACK rate. - * As a consequence, it's not as complicated as it sounds, just add - * any lower rates to the ACK rate bitmap. - */ - if (IWL_RATE_11M_INDEX < lowest_present_cck) - cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; - if (IWL_RATE_5M_INDEX < lowest_present_cck) - cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; - if (IWL_RATE_2M_INDEX < lowest_present_cck) - cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; - /* 1M already there or needed so always add */ - cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; - - IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n", - cck, ofdm); - - /* "basic_rates" is a misnomer here -- should be called ACK rates */ - ctx->staging.cck_basic_rates = cck; - ctx->staging.ofdm_basic_rates = ofdm; -} - -/** - * iwlagn_commit_rxon - commit staging_rxon to hardware - * - * The RXON command in staging_rxon is committed to the hardware and - * the active_rxon structure is updated with the new data. This - * function correctly transitions out of the RXON_ASSOC_MSK state if - * a HW tune is required based on the RXON structure changes. - * - * The connect/disconnect flow should be as the following: - * - * 1. make sure send RXON command with association bit unset if not connect - * this should include the channel and the band for the candidate - * to be connected to - * 2. Add Station before RXON association with the AP - * 3. RXON_timing has to send before RXON for connection - * 4. full RXON command - associated bit set - * 5. use RXON_ASSOC command to update any flags changes - */ -int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - /* cast away the const for active_rxon in this function */ - struct iwl_rxon_cmd *active = (void *)&ctx->active; - bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); - int ret; - - lockdep_assert_held(&priv->mutex); - - if (!iwl_is_alive(priv)) - return -EBUSY; - - /* This function hardcodes a bunch of dual-mode assumptions */ - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - if (!ctx->is_active) - return 0; - - /* always get timestamp with Rx frame */ - ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; - - /* recalculate basic rates */ - iwl_calc_basic_rates(priv, ctx); - - /* - * force CTS-to-self frames protection if RTS-CTS is not preferred - * one aggregation protection method - */ - if (!priv->hw_params.use_rts_for_aggregation) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - - if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || - !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - iwl_print_rx_config_cmd(priv, ctx->ctxid); - ret = iwl_check_rxon_cmd(priv, ctx); - if (ret) { - IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); - return -EINVAL; - } - - /* - * receive commit_rxon request - * abort any previous channel switch if still in process - */ - if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) && - (priv->switch_channel != ctx->staging.channel)) { - IWL_DEBUG_11H(priv, "abort channel switch on %d\n", - le16_to_cpu(priv->switch_channel)); - iwl_chswitch_done(priv, false); - } - - /* - * If we don't need to send a full RXON, we can use - * iwl_rxon_assoc_cmd which is used to reconfigure filter - * and other flags for the current radio configuration. - */ - if (!iwl_full_rxon_required(priv, ctx)) { - ret = iwlagn_send_rxon_assoc(priv, ctx); - if (ret) { - IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); - return ret; - } - - memcpy(active, &ctx->staging, sizeof(*active)); - /* - * We do not commit tx power settings while channel changing, - * do it now if after settings changed. - */ - iwl_set_tx_power(priv, priv->tx_power_next, false); - - /* make sure we are in the right PS state */ - iwl_power_update_mode(priv, true); - - return 0; - } - - iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto); - - IWL_DEBUG_INFO(priv, - "Going to commit RXON\n" - " * with%s RXON_FILTER_ASSOC_MSK\n" - " * channel = %d\n" - " * bssid = %pM\n", - (new_assoc ? "" : "out"), - le16_to_cpu(ctx->staging.channel), - ctx->staging.bssid_addr); - - /* - * Always clear associated first, but with the correct config. - * This is required as for example station addition for the - * AP station must be done after the BSSID is set to correctly - * set up filters in the device. - */ - ret = iwlagn_rxon_disconn(priv, ctx); - if (ret) - return ret; - - ret = iwlagn_set_pan_params(priv); - if (ret) - return ret; - - if (new_assoc) - return iwlagn_rxon_connect(priv, ctx); - - return 0; -} - -void iwlagn_config_ht40(struct ieee80211_conf *conf, - struct iwl_rxon_context *ctx) -{ - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } -} - -int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = conf->chandef.chan; - int ret = 0; - - IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); - - mutex_lock(&priv->mutex); - - if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { - IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); - goto out; - } - - if (!iwl_is_ready(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - goto out; - } - - if (changed & (IEEE80211_CONF_CHANGE_SMPS | - IEEE80211_CONF_CHANGE_CHANNEL)) { - /* mac80211 uses static for non-HT which is what we want */ - priv->current_ht_config.smps = conf->smps_mode; - - /* - * Recalculate chain counts. - * - * If monitor mode is enabled then mac80211 will - * set up the SM PS mode to OFF if an HT channel is - * configured. - */ - for_each_context(priv, ctx) - iwlagn_set_rxon_chain(priv, ctx); - } - - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - for_each_context(priv, ctx) { - /* Configure HT40 channels */ - if (ctx->ht.enabled != conf_is_ht(conf)) - ctx->ht.enabled = conf_is_ht(conf); - - if (ctx->ht.enabled) { - /* if HT40 is used, it should not change - * after associated except channel switch */ - if (!ctx->ht.is_40mhz || - !iwl_is_associated_ctx(ctx)) - iwlagn_config_ht40(conf, ctx); - } else - ctx->ht.is_40mhz = false; - - /* - * Default to no protection. Protection mode will - * later be set from BSS config in iwl_ht_conf - */ - ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; - - /* if we are switching from ht to 2.4 clear flags - * from any ht related info since 2.4 does not - * support ht */ - if (le16_to_cpu(ctx->staging.channel) != - channel->hw_value) - ctx->staging.flags = 0; - - iwl_set_rxon_channel(priv, channel, ctx); - iwl_set_rxon_ht(priv, &priv->current_ht_config); - - iwl_set_flags_for_band(priv, ctx, channel->band, - ctx->vif); - } - - iwl_update_bcast_stations(priv); - } - - if (changed & (IEEE80211_CONF_CHANGE_PS | - IEEE80211_CONF_CHANGE_IDLE)) { - ret = iwl_power_update_mode(priv, false); - if (ret) - IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); - } - - if (changed & IEEE80211_CONF_CHANGE_POWER) { - IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", - priv->tx_power_user_lmt, conf->power_level); - - iwl_set_tx_power(priv, conf->power_level, false); - } - - for_each_context(priv, ctx) { - if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - continue; - iwlagn_commit_rxon(priv, ctx); - } - out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return ret; -} - -static void iwlagn_check_needed_chains(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_bss_conf *bss_conf) -{ - struct ieee80211_vif *vif = ctx->vif; - struct iwl_rxon_context *tmp; - struct ieee80211_sta *sta; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - struct ieee80211_sta_ht_cap *ht_cap; - bool need_multiple; - - lockdep_assert_held(&priv->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - rcu_read_lock(); - sta = ieee80211_find_sta(vif, bss_conf->bssid); - if (!sta) { - /* - * If at all, this can only happen through a race - * when the AP disconnects us while we're still - * setting up the connection, in that case mac80211 - * will soon tell us about that. - */ - need_multiple = false; - rcu_read_unlock(); - break; - } - - ht_cap = &sta->ht_cap; - - need_multiple = true; - - /* - * If the peer advertises no support for receiving 2 and 3 - * stream MCS rates, it can't be transmitting them either. - */ - if (ht_cap->mcs.rx_mask[1] == 0 && - ht_cap->mcs.rx_mask[2] == 0) { - need_multiple = false; - } else if (!(ht_cap->mcs.tx_params & - IEEE80211_HT_MCS_TX_DEFINED)) { - /* If it can't TX MCS at all ... */ - need_multiple = false; - } else if (ht_cap->mcs.tx_params & - IEEE80211_HT_MCS_TX_RX_DIFF) { - int maxstreams; - - /* - * But if it can receive them, it might still not - * be able to transmit them, which is what we need - * to check here -- so check the number of streams - * it advertises for TX (if different from RX). - */ - - maxstreams = (ht_cap->mcs.tx_params & - IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK); - maxstreams >>= - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; - maxstreams += 1; - - if (maxstreams <= 1) - need_multiple = false; - } - - rcu_read_unlock(); - break; - case NL80211_IFTYPE_ADHOC: - /* currently */ - need_multiple = false; - break; - default: - /* only AP really */ - need_multiple = true; - break; - } - - ctx->ht_need_multiple_chains = need_multiple; - - if (!need_multiple) { - /* check all contexts */ - for_each_context(priv, tmp) { - if (!tmp->vif) - continue; - if (tmp->ht_need_multiple_chains) { - need_multiple = true; - break; - } - } - } - - ht_conf->single_chain_sufficient = !need_multiple; -} - -static void iwlagn_chain_noise_reset(struct iwl_priv *priv) -{ - struct iwl_chain_noise_data *data = &priv->chain_noise_data; - int ret; - - if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) - return; - - if ((data->state == IWL_CHAIN_NOISE_ALIVE) && - iwl_is_any_associated(priv)) { - struct iwl_calib_chain_noise_reset_cmd cmd; - - /* clear data for chain noise calibration algorithm */ - data->chain_noise_a = 0; - data->chain_noise_b = 0; - data->chain_noise_c = 0; - data->chain_signal_a = 0; - data->chain_signal_b = 0; - data->chain_signal_c = 0; - data->beacon_count = 0; - - memset(&cmd, 0, sizeof(cmd)); - iwl_set_calib_hdr(&cmd.hdr, - priv->phy_calib_chain_noise_reset_cmd); - ret = iwl_dvm_send_cmd_pdu(priv, - REPLY_PHY_CALIBRATION_CMD, - 0, sizeof(cmd), &cmd); - if (ret) - IWL_ERR(priv, - "Could not send REPLY_PHY_CALIBRATION_CMD\n"); - data->state = IWL_CHAIN_NOISE_ACCUMULATE; - IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); - } -} - -void iwlagn_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - int ret; - bool force = false; - - mutex_lock(&priv->mutex); - - if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { - /* - * If we go idle, then clearly no "passive-no-rx" - * workaround is needed any more, this is a reset. - */ - iwlagn_lift_passive_no_rx(priv); - } - - if (unlikely(!iwl_is_ready(priv))) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - mutex_unlock(&priv->mutex); - return; - } - - if (unlikely(!ctx->vif)) { - IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n"); - mutex_unlock(&priv->mutex); - return; - } - - if (changes & BSS_CHANGED_BEACON_INT) - force = true; - - if (changes & BSS_CHANGED_QOS) { - ctx->qos_data.qos_active = bss_conf->qos; - iwlagn_update_qos(priv, ctx); - } - - ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); - if (vif->bss_conf.use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - - if (changes & BSS_CHANGED_ASSOC) { - if (bss_conf->assoc) { - priv->timestamp = bss_conf->sync_tsf; - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - } else { - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - - if (ctx->ctxid == IWL_RXON_CTX_BSS) - priv->have_rekey_data = false; - } - - iwlagn_bt_coex_rssi_monitor(priv); - } - - if (ctx->ht.enabled) { - ctx->ht.protection = bss_conf->ht_operation_mode & - IEEE80211_HT_OP_MODE_PROTECTION; - ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode & - IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - iwlagn_check_needed_chains(priv, ctx, bss_conf); - iwl_set_rxon_ht(priv, &priv->current_ht_config); - } - - iwlagn_set_rxon_chain(priv, ctx); - - if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) - ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; - - if (bss_conf->use_cts_prot) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - else - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; - - memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); - - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) { - if (vif->bss_conf.enable_beacon) { - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - priv->beacon_ctx = ctx; - } else { - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - priv->beacon_ctx = NULL; - } - } - - /* - * If the ucode decides to do beacon filtering before - * association, it will lose beacons that are needed - * before sending frames out on passive channels. This - * causes association failures on those channels. Enable - * receiving beacons in such cases. - */ - - if (vif->type == NL80211_IFTYPE_STATION) { - if (!bss_conf->assoc) - ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK; - else - ctx->staging.filter_flags &= - ~RXON_FILTER_BCON_AWARE_MSK; - } - - if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - iwlagn_commit_rxon(priv, ctx); - - if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { - /* - * The chain noise calibration will enable PM upon - * completion. If calibration has already been run - * then we need to enable power management here. - */ - if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) - iwl_power_update_mode(priv, false); - - /* Enable RX differential gain and sensitivity calibrations */ - iwlagn_chain_noise_reset(priv); - priv->start_calib = 1; - } - - if (changes & BSS_CHANGED_IBSS) { - ret = iwlagn_manage_ibss_station(priv, vif, - bss_conf->ibss_joined); - if (ret) - IWL_ERR(priv, "failed to %s IBSS station %pM\n", - bss_conf->ibss_joined ? "add" : "remove", - bss_conf->bssid); - } - - if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) { - if (iwlagn_update_beacon(priv, vif)) - IWL_ERR(priv, "Error updating beacon\n"); - } - - mutex_unlock(&priv->mutex); -} - -void iwlagn_post_scan(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - - /* - * We do not commit power settings while scan is pending, - * do it now if the settings changed. - */ - iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); - iwl_set_tx_power(priv, priv->tx_power_next, false); - - /* - * Since setting the RXON may have been deferred while - * performing the scan, fire one off if needed - */ - for_each_context(priv, ctx) - if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - iwlagn_commit_rxon(priv, ctx); - - iwlagn_set_pan_params(priv); -} diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c deleted file mode 100644 index 648159495bbc..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ /dev/null @@ -1,1075 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#include -#include -#include -#include - -#include "dev.h" -#include "agn.h" - -/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after - * sending probe req. This should be set long enough to hear probe responses - * from more than one AP. */ -#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ -#define IWL_ACTIVE_DWELL_TIME_52 (20) - -#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) -#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) - -/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. - * Must be set longer than active dwell time. - * For the most reliable scan, set > AP beacon interval (typically 100msec). */ -#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ -#define IWL_PASSIVE_DWELL_TIME_52 (10) -#define IWL_PASSIVE_DWELL_BASE (100) -#define IWL_CHANNEL_TUNE_TIME 5 -#define MAX_SCAN_CHANNEL 50 - -/* For reset radio, need minimal dwell time only */ -#define IWL_RADIO_RESET_DWELL_TIME 5 - -static int iwl_send_scan_abort(struct iwl_priv *priv) -{ - int ret; - struct iwl_host_cmd cmd = { - .id = REPLY_SCAN_ABORT_CMD, - .flags = CMD_WANT_SKB, - }; - __le32 *status; - - /* Exit instantly with error when device is not ready - * to receive scan abort command or it does not perform - * hardware scan currently */ - if (!test_bit(STATUS_READY, &priv->status) || - !test_bit(STATUS_SCAN_HW, &priv->status) || - test_bit(STATUS_FW_ERROR, &priv->status)) - return -EIO; - - ret = iwl_dvm_send_cmd(priv, &cmd); - if (ret) - return ret; - - status = (void *)cmd.resp_pkt->data; - if (*status != CAN_ABORT_STATUS) { - /* The scan abort will return 1 for success or - * 2 for "failure". A failure condition can be - * due to simply not being in an active scan which - * can occur if we send the scan abort before we - * the microcode has notified us that a scan is - * completed. */ - IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", - le32_to_cpu(*status)); - ret = -EIO; - } - - iwl_free_resp(&cmd); - return ret; -} - -static void iwl_complete_scan(struct iwl_priv *priv, bool aborted) -{ - /* check if scan was requested from mac80211 */ - if (priv->scan_request) { - IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); - ieee80211_scan_completed(priv->hw, aborted); - } - - priv->scan_type = IWL_SCAN_NORMAL; - priv->scan_vif = NULL; - priv->scan_request = NULL; -} - -static void iwl_process_scan_complete(struct iwl_priv *priv) -{ - bool aborted; - - lockdep_assert_held(&priv->mutex); - - if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status)) - return; - - IWL_DEBUG_SCAN(priv, "Completed scan.\n"); - - cancel_delayed_work(&priv->scan_check); - - aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); - if (aborted) - IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); - - if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); - goto out_settings; - } - - if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { - int err; - - /* Check if mac80211 requested scan during our internal scan */ - if (priv->scan_request == NULL) - goto out_complete; - - /* If so request a new scan */ - err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL, - priv->scan_request->channels[0]->band); - if (err) { - IWL_DEBUG_SCAN(priv, - "failed to initiate pending scan: %d\n", err); - aborted = true; - goto out_complete; - } - - return; - } - -out_complete: - iwl_complete_scan(priv, aborted); - -out_settings: - /* Can we still talk to firmware ? */ - if (!iwl_is_ready_rf(priv)) - return; - - iwlagn_post_scan(priv); -} - -void iwl_force_scan_end(struct iwl_priv *priv) -{ - lockdep_assert_held(&priv->mutex); - - if (!test_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); - return; - } - - IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); - clear_bit(STATUS_SCANNING, &priv->status); - clear_bit(STATUS_SCAN_HW, &priv->status); - clear_bit(STATUS_SCAN_ABORTING, &priv->status); - clear_bit(STATUS_SCAN_COMPLETE, &priv->status); - iwl_complete_scan(priv, true); -} - -static void iwl_do_scan_abort(struct iwl_priv *priv) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - if (!test_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); - return; - } - - if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); - return; - } - - ret = iwl_send_scan_abort(priv); - if (ret) { - IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret); - iwl_force_scan_end(priv); - } else - IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n"); -} - -/** - * iwl_scan_cancel - Cancel any currently executing HW scan - */ -int iwl_scan_cancel(struct iwl_priv *priv) -{ - IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); - queue_work(priv->workqueue, &priv->abort_scan); - return 0; -} - -/** - * iwl_scan_cancel_timeout - Cancel any currently executing HW scan - * @ms: amount of time to wait (in milliseconds) for scan to abort - * - */ -void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(ms); - - lockdep_assert_held(&priv->mutex); - - IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); - - iwl_do_scan_abort(priv); - - while (time_before_eq(jiffies, timeout)) { - if (!test_bit(STATUS_SCAN_HW, &priv->status)) - goto finished; - msleep(20); - } - - return; - - finished: - /* - * Now STATUS_SCAN_HW is clear. This means that the - * device finished, but the background work is going - * to execute at best as soon as we release the mutex. - * Since we need to be able to issue a new scan right - * after this function returns, run the complete here. - * The STATUS_SCAN_COMPLETE bit will then be cleared - * and prevent the background work from "completing" - * a possible new scan. - */ - iwl_process_scan_complete(priv); -} - -/* Service response to REPLY_SCAN_CMD (0x80) */ -static void iwl_rx_reply_scan(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scanreq_notification *notif = (void *)pkt->data; - - IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); -#endif -} - -/* Service SCAN_START_NOTIFICATION (0x82) */ -static void iwl_rx_scan_start_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scanstart_notification *notif = (void *)pkt->data; - - priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); - IWL_DEBUG_SCAN(priv, "Scan start: " - "%d [802.11%s] " - "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", - notif->channel, - notif->band ? "bg" : "a", - le32_to_cpu(notif->tsf_high), - le32_to_cpu(notif->tsf_low), - notif->status, notif->beacon_timer); -} - -/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ -static void iwl_rx_scan_results_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scanresults_notification *notif = (void *)pkt->data; - - IWL_DEBUG_SCAN(priv, "Scan ch.res: " - "%d [802.11%s] " - "probe status: %u:%u " - "(TSF: 0x%08X:%08X) - %d " - "elapsed=%lu usec\n", - notif->channel, - notif->band ? "bg" : "a", - notif->probe_status, notif->num_probe_not_sent, - le32_to_cpu(notif->tsf_high), - le32_to_cpu(notif->tsf_low), - le32_to_cpu(notif->statistics[0]), - le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); -#endif -} - -/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ -static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data; - - IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", - scan_notif->scanned_channels, - scan_notif->tsf_low, - scan_notif->tsf_high, scan_notif->status); - - IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", - (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", - jiffies_to_msecs(jiffies - priv->scan_start)); - - /* - * When aborting, we run the scan completed background work inline - * and the background work must then do nothing. The SCAN_COMPLETE - * bit helps implement that logic and thus needs to be set before - * queueing the work. Also, since the scan abort waits for SCAN_HW - * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW - * to avoid a race there. - */ - set_bit(STATUS_SCAN_COMPLETE, &priv->status); - clear_bit(STATUS_SCAN_HW, &priv->status); - queue_work(priv->workqueue, &priv->scan_completed); - - if (priv->iw_mode != NL80211_IFTYPE_ADHOC && - iwl_advanced_bt_coexist(priv) && - priv->bt_status != scan_notif->bt_status) { - if (scan_notif->bt_status) { - /* BT on */ - if (!priv->bt_ch_announce) - priv->bt_traffic_load = - IWL_BT_COEX_TRAFFIC_LOAD_HIGH; - /* - * otherwise, no traffic load information provided - * no changes made - */ - } else { - /* BT off */ - priv->bt_traffic_load = - IWL_BT_COEX_TRAFFIC_LOAD_NONE; - } - priv->bt_status = scan_notif->bt_status; - queue_work(priv->workqueue, - &priv->bt_traffic_change_work); - } -} - -void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) -{ - /* scan handlers */ - priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; - priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; - priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = - iwl_rx_scan_results_notif; - priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = - iwl_rx_scan_complete_notif; -} - -static u16 iwl_get_active_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, u8 n_probes) -{ - if (band == IEEE80211_BAND_5GHZ) - return IWL_ACTIVE_DWELL_TIME_52 + - IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); - else - return IWL_ACTIVE_DWELL_TIME_24 + - IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); -} - -static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) -{ - struct iwl_rxon_context *ctx; - int limits[NUM_IWL_RXON_CTX] = {}; - int n_active = 0; - u16 limit; - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - /* - * If we're associated, we clamp the dwell time 98% - * of the beacon interval (minus 2 * channel tune time) - * If both contexts are active, we have to restrict to - * 1/2 of the minimum of them, because they might be in - * lock-step with the time inbetween only half of what - * time we'd have in each of them. - */ - for_each_context(priv, ctx) { - switch (ctx->staging.dev_type) { - case RXON_DEV_TYPE_P2P: - /* no timing constraints */ - continue; - case RXON_DEV_TYPE_ESS: - default: - /* timing constraints if associated */ - if (!iwl_is_associated_ctx(ctx)) - continue; - break; - case RXON_DEV_TYPE_CP: - case RXON_DEV_TYPE_2STA: - /* - * These seem to always have timers for TBTT - * active in uCode even when not associated yet. - */ - break; - } - - limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE; - } - - switch (n_active) { - case 0: - return dwell_time; - case 2: - limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; - limit /= 2; - dwell_time = min(limit, dwell_time); - /* fall through to limit further */ - case 1: - limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; - limit /= n_active; - return min(limit, dwell_time); - default: - WARN_ON_ONCE(1); - return dwell_time; - } -} - -static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band) -{ - u16 passive = (band == IEEE80211_BAND_2GHZ) ? - IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : - IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; - - return iwl_limit_dwell(priv, passive); -} - -/* Return valid, unused, channel for a passive scan to reset the RF */ -static u8 iwl_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band) -{ - struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band]; - struct iwl_rxon_context *ctx; - int i; - - for (i = 0; i < sband->n_channels; i++) { - bool busy = false; - - for_each_context(priv, ctx) { - busy = sband->channels[i].hw_value == - le16_to_cpu(ctx->staging.channel); - if (busy) - break; - } - - if (busy) - continue; - - if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED)) - return sband->channels[i].hw_value; - } - - return 0; -} - -static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, - struct ieee80211_vif *vif, - enum ieee80211_band band, - struct iwl_scan_channel *scan_ch) -{ - const struct ieee80211_supported_band *sband; - u16 channel; - - sband = iwl_get_hw_mode(priv, band); - if (!sband) { - IWL_ERR(priv, "invalid band\n"); - return 0; - } - - channel = iwl_get_single_channel_number(priv, band); - if (channel) { - scan_ch->channel = cpu_to_le16(channel); - scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; - scan_ch->active_dwell = - cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); - scan_ch->passive_dwell = - cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); - /* Set txpower levels to defaults */ - scan_ch->dsp_atten = 110; - if (band == IEEE80211_BAND_5GHZ) - scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; - else - scan_ch->tx_gain = ((1 << 5) | (5 << 3)); - return 1; - } - - IWL_ERR(priv, "no valid channel found\n"); - return 0; -} - -static int iwl_get_channels_for_scan(struct iwl_priv *priv, - struct ieee80211_vif *vif, - enum ieee80211_band band, - u8 is_active, u8 n_probes, - struct iwl_scan_channel *scan_ch) -{ - struct ieee80211_channel *chan; - const struct ieee80211_supported_band *sband; - u16 passive_dwell = 0; - u16 active_dwell = 0; - int added, i; - u16 channel; - - sband = iwl_get_hw_mode(priv, band); - if (!sband) - return 0; - - active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); - passive_dwell = iwl_get_passive_dwell_time(priv, band); - - if (passive_dwell <= active_dwell) - passive_dwell = active_dwell + 1; - - for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { - chan = priv->scan_request->channels[i]; - - if (chan->band != band) - continue; - - channel = chan->hw_value; - scan_ch->channel = cpu_to_le16(channel); - - if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR)) - scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; - else - scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; - - if (n_probes) - scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); - - scan_ch->active_dwell = cpu_to_le16(active_dwell); - scan_ch->passive_dwell = cpu_to_le16(passive_dwell); - - /* Set txpower levels to defaults */ - scan_ch->dsp_atten = 110; - - /* NOTE: if we were doing 6Mb OFDM for scans we'd use - * power level: - * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; - */ - if (band == IEEE80211_BAND_5GHZ) - scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; - else - scan_ch->tx_gain = ((1 << 5) | (5 << 3)); - - IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", - channel, le32_to_cpu(scan_ch->type), - (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? - "ACTIVE" : "PASSIVE", - (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? - active_dwell : passive_dwell); - - scan_ch++; - added++; - } - - IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); - return added; -} - -/** - * iwl_fill_probe_req - fill in all required fields and IE for probe request - */ - -static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta, - const u8 *ies, int ie_len, const u8 *ssid, - u8 ssid_len, int left) -{ - int len = 0; - u8 *pos = NULL; - - /* Make sure there is enough space for the probe request, - * two mandatory IEs and the data */ - left -= 24; - if (left < 0) - return 0; - - frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); - eth_broadcast_addr(frame->da); - memcpy(frame->sa, ta, ETH_ALEN); - eth_broadcast_addr(frame->bssid); - frame->seq_ctrl = 0; - - len += 24; - - /* ...next IE... */ - pos = &frame->u.probe_req.variable[0]; - - /* fill in our SSID IE */ - left -= ssid_len + 2; - if (left < 0) - return 0; - *pos++ = WLAN_EID_SSID; - *pos++ = ssid_len; - if (ssid && ssid_len) { - memcpy(pos, ssid, ssid_len); - pos += ssid_len; - } - - len += ssid_len + 2; - - if (WARN_ON(left < ie_len)) - return len; - - if (ies && ie_len) { - memcpy(pos, ies, ie_len); - len += ie_len; - } - - return (u16)len; -} - -static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_SCAN_CMD, - .len = { sizeof(struct iwl_scan_cmd), }, - }; - struct iwl_scan_cmd *scan; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - u32 rate_flags = 0; - u16 cmd_len = 0; - u16 rx_chain = 0; - enum ieee80211_band band; - u8 n_probes = 0; - u8 rx_ant = priv->nvm_data->valid_rx_ant; - u8 rate; - bool is_active = false; - int chan_mod; - u8 active_chains; - u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant; - int ret; - int scan_cmd_size = sizeof(struct iwl_scan_cmd) + - MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) + - priv->fw->ucode_capa.max_probe_length; - const u8 *ssid = NULL; - u8 ssid_len = 0; - - if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL && - (!priv->scan_request || - priv->scan_request->n_channels > MAX_SCAN_CHANNEL))) - return -EINVAL; - - lockdep_assert_held(&priv->mutex); - - if (vif) - ctx = iwl_rxon_ctx_from_vif(vif); - - if (!priv->scan_cmd) { - priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL); - if (!priv->scan_cmd) { - IWL_DEBUG_SCAN(priv, - "fail to allocate memory for scan\n"); - return -ENOMEM; - } - } - scan = priv->scan_cmd; - memset(scan, 0, scan_cmd_size); - - scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; - scan->quiet_time = IWL_ACTIVE_QUIET_TIME; - - if (iwl_is_any_associated(priv)) { - u16 interval = 0; - u32 extra; - u32 suspend_time = 100; - u32 scan_suspend_time = 100; - - IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); - switch (priv->scan_type) { - case IWL_SCAN_RADIO_RESET: - interval = 0; - break; - case IWL_SCAN_NORMAL: - interval = vif->bss_conf.beacon_int; - break; - } - - scan->suspend_time = 0; - scan->max_out_time = cpu_to_le32(200 * 1024); - if (!interval) - interval = suspend_time; - - extra = (suspend_time / interval) << 22; - scan_suspend_time = (extra | - ((suspend_time % interval) * 1024)); - scan->suspend_time = cpu_to_le32(scan_suspend_time); - IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", - scan_suspend_time, interval); - } - - switch (priv->scan_type) { - case IWL_SCAN_RADIO_RESET: - IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); - /* - * Override quiet time as firmware checks that active - * dwell is >= quiet; since we use passive scan it'll - * not actually be used. - */ - scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); - break; - case IWL_SCAN_NORMAL: - if (priv->scan_request->n_ssids) { - int i, p = 0; - IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); - /* - * The highest priority SSID is inserted to the - * probe request template. - */ - ssid_len = priv->scan_request->ssids[0].ssid_len; - ssid = priv->scan_request->ssids[0].ssid; - - /* - * Invert the order of ssids, the firmware will invert - * it back. - */ - for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) { - scan->direct_scan[p].id = WLAN_EID_SSID; - scan->direct_scan[p].len = - priv->scan_request->ssids[i].ssid_len; - memcpy(scan->direct_scan[p].ssid, - priv->scan_request->ssids[i].ssid, - priv->scan_request->ssids[i].ssid_len); - n_probes++; - p++; - } - is_active = true; - } else - IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); - break; - } - - scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; - scan->tx_cmd.sta_id = ctx->bcast_sta_id; - scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - switch (priv->scan_band) { - case IEEE80211_BAND_2GHZ: - scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; - chan_mod = le32_to_cpu( - priv->contexts[IWL_RXON_CTX_BSS].active.flags & - RXON_FLG_CHANNEL_MODE_MSK) - >> RXON_FLG_CHANNEL_MODE_POS; - if ((priv->scan_request && priv->scan_request->no_cck) || - chan_mod == CHANNEL_MODE_PURE_40) { - rate = IWL_RATE_6M_PLCP; - } else { - rate = IWL_RATE_1M_PLCP; - rate_flags = RATE_MCS_CCK_MSK; - } - /* - * Internal scans are passive, so we can indiscriminately set - * the BT ignore flag on 2.4 GHz since it applies to TX only. - */ - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) - scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; - break; - case IEEE80211_BAND_5GHZ: - rate = IWL_RATE_6M_PLCP; - break; - default: - IWL_WARN(priv, "Invalid scan band\n"); - return -EIO; - } - - /* - * If active scanning is requested but a certain channel is - * marked passive, we can do active scanning if we detect - * transmissions. - * - * There is an issue with some firmware versions that triggers - * a sysassert on a "good CRC threshold" of zero (== disabled), - * on a radar channel even though this means that we should NOT - * send probes. - * - * The "good CRC threshold" is the number of frames that we - * need to receive during our dwell time on a channel before - * sending out probes -- setting this to a huge value will - * mean we never reach it, but at the same time work around - * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER - * here instead of IWL_GOOD_CRC_TH_DISABLED. - * - * This was fixed in later versions along with some other - * scan changes, and the threshold behaves as a flag in those - * versions. - */ - if (priv->new_scan_threshold_behaviour) - scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : - IWL_GOOD_CRC_TH_DISABLED; - else - scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : - IWL_GOOD_CRC_TH_NEVER; - - band = priv->scan_band; - - if (band == IEEE80211_BAND_2GHZ && - priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - /* transmit 2.4 GHz probes only on first antenna */ - scan_tx_antennas = first_antenna(scan_tx_antennas); - } - - priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, - priv->scan_tx_ant[band], - scan_tx_antennas); - rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); - scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); - - /* - * In power save mode while associated use one chain, - * otherwise use all chains - */ - if (test_bit(STATUS_POWER_PMI, &priv->status) && - !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) { - /* rx_ant has been set to all valid chains previously */ - active_chains = rx_ant & - ((u8)(priv->chain_noise_data.active_chains)); - if (!active_chains) - active_chains = rx_ant; - - IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", - priv->chain_noise_data.active_chains); - - rx_ant = first_antenna(active_chains); - } - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist && - priv->bt_full_concurrent) { - /* operated as 1x1 in full concurrency mode */ - rx_ant = first_antenna(rx_ant); - } - - /* MIMO is not used here, but value is required */ - rx_chain |= - priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS; - rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; - rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; - rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; - scan->rx_chain = cpu_to_le16(rx_chain); - switch (priv->scan_type) { - case IWL_SCAN_NORMAL: - cmd_len = iwl_fill_probe_req( - (struct ieee80211_mgmt *)scan->data, - vif->addr, - priv->scan_request->ie, - priv->scan_request->ie_len, - ssid, ssid_len, - scan_cmd_size - sizeof(*scan)); - break; - case IWL_SCAN_RADIO_RESET: - /* use bcast addr, will not be transmitted but must be valid */ - cmd_len = iwl_fill_probe_req( - (struct ieee80211_mgmt *)scan->data, - iwl_bcast_addr, NULL, 0, - NULL, 0, - scan_cmd_size - sizeof(*scan)); - break; - default: - BUG(); - } - scan->tx_cmd.len = cpu_to_le16(cmd_len); - - scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | - RXON_FILTER_BCON_AWARE_MSK); - - switch (priv->scan_type) { - case IWL_SCAN_RADIO_RESET: - scan->channel_count = - iwl_get_channel_for_reset_scan(priv, vif, band, - (void *)&scan->data[cmd_len]); - break; - case IWL_SCAN_NORMAL: - scan->channel_count = - iwl_get_channels_for_scan(priv, vif, band, - is_active, n_probes, - (void *)&scan->data[cmd_len]); - break; - } - - if (scan->channel_count == 0) { - IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); - return -EIO; - } - - cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) + - scan->channel_count * sizeof(struct iwl_scan_channel); - cmd.data[0] = scan; - cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - scan->len = cpu_to_le16(cmd.len[0]); - - /* set scan bit here for PAN params */ - set_bit(STATUS_SCAN_HW, &priv->status); - - ret = iwlagn_set_pan_params(priv); - if (ret) { - clear_bit(STATUS_SCAN_HW, &priv->status); - return ret; - } - - ret = iwl_dvm_send_cmd(priv, &cmd); - if (ret) { - clear_bit(STATUS_SCAN_HW, &priv->status); - iwlagn_set_pan_params(priv); - } - - return ret; -} - -void iwl_init_scan_params(struct iwl_priv *priv) -{ - u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1; - if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; - if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; -} - -int __must_check iwl_scan_initiate(struct iwl_priv *priv, - struct ieee80211_vif *vif, - enum iwl_scan_type scan_type, - enum ieee80211_band band) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - cancel_delayed_work(&priv->scan_check); - - if (!iwl_is_ready_rf(priv)) { - IWL_WARN(priv, "Request scan called when driver not ready.\n"); - return -EIO; - } - - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - IWL_DEBUG_SCAN(priv, - "Multiple concurrent scan requests in parallel.\n"); - return -EBUSY; - } - - if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); - return -EBUSY; - } - - IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", - scan_type == IWL_SCAN_NORMAL ? "" : - "internal short "); - - set_bit(STATUS_SCANNING, &priv->status); - priv->scan_type = scan_type; - priv->scan_start = jiffies; - priv->scan_band = band; - - ret = iwlagn_request_scan(priv, vif); - if (ret) { - clear_bit(STATUS_SCANNING, &priv->status); - priv->scan_type = IWL_SCAN_NORMAL; - return ret; - } - - queue_delayed_work(priv->workqueue, &priv->scan_check, - IWL_SCAN_CHECK_WATCHDOG); - - return 0; -} - - -/* - * internal short scan, this function should only been called while associated. - * It will reset and tune the radio to prevent possible RF related problem - */ -void iwl_internal_short_hw_scan(struct iwl_priv *priv) -{ - queue_work(priv->workqueue, &priv->start_internal_scan); -} - -static void iwl_bg_start_internal_scan(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, start_internal_scan); - - IWL_DEBUG_SCAN(priv, "Start internal scan\n"); - - mutex_lock(&priv->mutex); - - if (priv->scan_type == IWL_SCAN_RADIO_RESET) { - IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); - goto unlock; - } - - if (test_bit(STATUS_SCANNING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); - goto unlock; - } - - if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band)) - IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); - unlock: - mutex_unlock(&priv->mutex); -} - -static void iwl_bg_scan_check(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, scan_check.work); - - IWL_DEBUG_SCAN(priv, "Scan check work\n"); - - /* Since we are here firmware does not finish scan and - * most likely is in bad shape, so we don't bother to - * send abort command, just force scan complete to mac80211 */ - mutex_lock(&priv->mutex); - iwl_force_scan_end(priv); - mutex_unlock(&priv->mutex); -} - -static void iwl_bg_abort_scan(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); - - IWL_DEBUG_SCAN(priv, "Abort scan work\n"); - - /* We keep scan_check work queued in case when firmware will not - * report back scan completed notification */ - mutex_lock(&priv->mutex); - iwl_scan_cancel_timeout(priv, 200); - mutex_unlock(&priv->mutex); -} - -static void iwl_bg_scan_completed(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, scan_completed); - - mutex_lock(&priv->mutex); - iwl_process_scan_complete(priv); - mutex_unlock(&priv->mutex); -} - -void iwl_setup_scan_deferred_work(struct iwl_priv *priv) -{ - INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); - INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); - INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); - INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); -} - -void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) -{ - cancel_work_sync(&priv->start_internal_scan); - cancel_work_sync(&priv->abort_scan); - cancel_work_sync(&priv->scan_completed); - - if (cancel_delayed_work_sync(&priv->scan_check)) { - mutex_lock(&priv->mutex); - iwl_force_scan_end(priv); - mutex_unlock(&priv->mutex); - } -} diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c deleted file mode 100644 index 0fa67d3b7235..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ /dev/null @@ -1,1442 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include "iwl-trans.h" -#include "dev.h" -#include "agn.h" - -const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; - -static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) -{ - lockdep_assert_held(&priv->sta_lock); - - if (sta_id >= IWLAGN_STATION_COUNT) { - IWL_ERR(priv, "invalid sta_id %u\n", sta_id); - return -EINVAL; - } - if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) - IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u " - "addr %pM\n", - sta_id, priv->stations[sta_id].sta.sta.addr); - - if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { - IWL_DEBUG_ASSOC(priv, - "STA id %u addr %pM already present in uCode " - "(according to driver)\n", - sta_id, priv->stations[sta_id].sta.sta.addr); - } else { - priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; - IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", - sta_id, priv->stations[sta_id].sta.sta.addr); - } - return 0; -} - -static void iwl_process_add_sta_resp(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) -{ - struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data; - - IWL_DEBUG_INFO(priv, "Processing response for adding station\n"); - - spin_lock_bh(&priv->sta_lock); - - switch (add_sta_resp->status) { - case ADD_STA_SUCCESS_MSK: - IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); - break; - case ADD_STA_NO_ROOM_IN_TABLE: - IWL_ERR(priv, "Adding station failed, no room in table.\n"); - break; - case ADD_STA_NO_BLOCK_ACK_RESOURCE: - IWL_ERR(priv, - "Adding station failed, no block ack resource.\n"); - break; - case ADD_STA_MODIFY_NON_EXIST_STA: - IWL_ERR(priv, "Attempting to modify non-existing station\n"); - break; - default: - IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", - add_sta_resp->status); - break; - } - - spin_unlock_bh(&priv->sta_lock); -} - -void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - iwl_process_add_sta_resp(priv, pkt); -} - -int iwl_send_add_sta(struct iwl_priv *priv, - struct iwl_addsta_cmd *sta, u8 flags) -{ - int ret = 0; - struct iwl_host_cmd cmd = { - .id = REPLY_ADD_STA, - .flags = flags, - .data = { sta, }, - .len = { sizeof(*sta), }, - }; - u8 sta_id __maybe_unused = sta->sta.sta_id; - struct iwl_rx_packet *pkt; - struct iwl_add_sta_resp *add_sta_resp; - - IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", - sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); - - if (!(flags & CMD_ASYNC)) { - cmd.flags |= CMD_WANT_SKB; - might_sleep(); - } - - ret = iwl_dvm_send_cmd(priv, &cmd); - - if (ret || (flags & CMD_ASYNC)) - return ret; - - pkt = cmd.resp_pkt; - add_sta_resp = (void *)pkt->data; - - /* debug messages are printed in the handler */ - if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) { - spin_lock_bh(&priv->sta_lock); - ret = iwl_sta_ucode_activate(priv, sta_id); - spin_unlock_bh(&priv->sta_lock); - } else { - ret = -EIO; - } - - iwl_free_resp(&cmd); - - return ret; -} - -bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_sta *sta) -{ - if (!ctx->ht.enabled || !ctx->ht.is_40mhz) - return false; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (priv->disable_ht40) - return false; -#endif - - /* special case for RXON */ - if (!sta) - return true; - - return sta->bandwidth >= IEEE80211_STA_RX_BW_40; -} - -static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, - struct ieee80211_sta *sta, - struct iwl_rxon_context *ctx, - __le32 *flags, __le32 *mask) -{ - struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; - - *mask = STA_FLG_RTS_MIMO_PROT_MSK | - STA_FLG_MIMO_DIS_MSK | - STA_FLG_HT40_EN_MSK | - STA_FLG_MAX_AGG_SIZE_MSK | - STA_FLG_AGG_MPDU_DENSITY_MSK; - *flags = 0; - - if (!sta || !sta_ht_inf->ht_supported) - return; - - IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n", - sta->addr, - (sta->smps_mode == IEEE80211_SMPS_STATIC) ? - "static" : - (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? - "dynamic" : "disabled"); - - switch (sta->smps_mode) { - case IEEE80211_SMPS_STATIC: - *flags |= STA_FLG_MIMO_DIS_MSK; - break; - case IEEE80211_SMPS_DYNAMIC: - *flags |= STA_FLG_RTS_MIMO_PROT_MSK; - break; - case IEEE80211_SMPS_OFF: - break; - default: - IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode); - break; - } - - *flags |= cpu_to_le32( - (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); - - *flags |= cpu_to_le32( - (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); - - if (iwl_is_ht40_tx_allowed(priv, ctx, sta)) - *flags |= STA_FLG_HT40_EN_MSK; -} - -int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_sta *sta) -{ - u8 sta_id = iwl_sta_id(sta); - __le32 flags, mask; - struct iwl_addsta_cmd cmd; - - if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION)) - return -EINVAL; - - iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask); - - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].sta.station_flags &= ~mask; - priv->stations[sta_id].sta.station_flags |= flags; - spin_unlock_bh(&priv->sta_lock); - - memset(&cmd, 0, sizeof(cmd)); - cmd.mode = STA_CONTROL_MODIFY_MSK; - cmd.station_flags_msk = mask; - cmd.station_flags = flags; - cmd.sta.sta_id = sta_id; - - return iwl_send_add_sta(priv, &cmd, 0); -} - -static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, - struct ieee80211_sta *sta, - struct iwl_rxon_context *ctx) -{ - __le32 flags, mask; - - iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask); - - lockdep_assert_held(&priv->sta_lock); - priv->stations[index].sta.station_flags &= ~mask; - priv->stations[index].sta.station_flags |= flags; -} - -/** - * iwl_prep_station - Prepare station information for addition - * - * should be called with sta_lock held - */ -u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, struct ieee80211_sta *sta) -{ - struct iwl_station_entry *station; - int i; - u8 sta_id = IWL_INVALID_STATION; - - if (is_ap) - sta_id = ctx->ap_sta_id; - else if (is_broadcast_ether_addr(addr)) - sta_id = ctx->bcast_sta_id; - else - for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) { - if (ether_addr_equal(priv->stations[i].sta.sta.addr, - addr)) { - sta_id = i; - break; - } - - if (!priv->stations[i].used && - sta_id == IWL_INVALID_STATION) - sta_id = i; - } - - /* - * These two conditions have the same outcome, but keep them - * separate - */ - if (unlikely(sta_id == IWL_INVALID_STATION)) - return sta_id; - - /* - * uCode is not able to deal with multiple requests to add a - * station. Keep track if one is in progress so that we do not send - * another. - */ - if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { - IWL_DEBUG_INFO(priv, "STA %d already in process of being " - "added.\n", sta_id); - return sta_id; - } - - if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && - (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && - ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) { - IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not " - "adding again.\n", sta_id, addr); - return sta_id; - } - - station = &priv->stations[sta_id]; - station->used = IWL_STA_DRIVER_ACTIVE; - IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", - sta_id, addr); - priv->num_stations++; - - /* Set up the REPLY_ADD_STA command to send to device */ - memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); - memcpy(station->sta.sta.addr, addr, ETH_ALEN); - station->sta.mode = 0; - station->sta.sta.sta_id = sta_id; - station->sta.station_flags = ctx->station_flags; - station->ctxid = ctx->ctxid; - - if (sta) { - struct iwl_station_priv *sta_priv; - - sta_priv = (void *)sta->drv_priv; - sta_priv->ctx = ctx; - } - - /* - * OK to call unconditionally, since local stations (IBSS BSSID - * STA and broadcast STA) pass in a NULL sta, and mac80211 - * doesn't allow HT IBSS. - */ - iwl_set_ht_add_station(priv, sta_id, sta, ctx); - - return sta_id; - -} - -#define STA_WAIT_TIMEOUT (HZ/2) - -/** - * iwl_add_station_common - - */ -int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - const u8 *addr, bool is_ap, - struct ieee80211_sta *sta, u8 *sta_id_r) -{ - int ret = 0; - u8 sta_id; - struct iwl_addsta_cmd sta_cmd; - - *sta_id_r = 0; - spin_lock_bh(&priv->sta_lock); - sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Unable to prepare station %pM for addition\n", - addr); - spin_unlock_bh(&priv->sta_lock); - return -EINVAL; - } - - /* - * uCode is not able to deal with multiple requests to add a - * station. Keep track if one is in progress so that we do not send - * another. - */ - if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { - IWL_DEBUG_INFO(priv, "STA %d already in process of being " - "added.\n", sta_id); - spin_unlock_bh(&priv->sta_lock); - return -EEXIST; - } - - if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && - (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { - IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not " - "adding again.\n", sta_id, addr); - spin_unlock_bh(&priv->sta_lock); - return -EEXIST; - } - - priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, - sizeof(struct iwl_addsta_cmd)); - spin_unlock_bh(&priv->sta_lock); - - /* Add station to device's station table */ - ret = iwl_send_add_sta(priv, &sta_cmd, 0); - if (ret) { - spin_lock_bh(&priv->sta_lock); - IWL_ERR(priv, "Adding station %pM failed.\n", - priv->stations[sta_id].sta.sta.addr); - priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; - priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; - spin_unlock_bh(&priv->sta_lock); - } - *sta_id_r = sta_id; - return ret; -} - -/** - * iwl_sta_ucode_deactivate - deactivate ucode status for a station - */ -static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) -{ - lockdep_assert_held(&priv->sta_lock); - - /* Ucode must be active and driver must be non active */ - if ((priv->stations[sta_id].used & - (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != - IWL_STA_UCODE_ACTIVE) - IWL_ERR(priv, "removed non active STA %u\n", sta_id); - - priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; - - memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); - IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); -} - -static int iwl_send_remove_station(struct iwl_priv *priv, - const u8 *addr, int sta_id, - bool temporary) -{ - struct iwl_rx_packet *pkt; - int ret; - struct iwl_rem_sta_cmd rm_sta_cmd; - struct iwl_rem_sta_resp *rem_sta_resp; - - struct iwl_host_cmd cmd = { - .id = REPLY_REMOVE_STA, - .len = { sizeof(struct iwl_rem_sta_cmd), }, - .data = { &rm_sta_cmd, }, - }; - - memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); - rm_sta_cmd.num_sta = 1; - memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); - - cmd.flags |= CMD_WANT_SKB; - - ret = iwl_dvm_send_cmd(priv, &cmd); - - if (ret) - return ret; - - pkt = cmd.resp_pkt; - rem_sta_resp = (void *)pkt->data; - - switch (rem_sta_resp->status) { - case REM_STA_SUCCESS_MSK: - if (!temporary) { - spin_lock_bh(&priv->sta_lock); - iwl_sta_ucode_deactivate(priv, sta_id); - spin_unlock_bh(&priv->sta_lock); - } - IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); - break; - default: - ret = -EIO; - IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); - break; - } - - iwl_free_resp(&cmd); - - return ret; -} - -/** - * iwl_remove_station - Remove driver's knowledge of station. - */ -int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, - const u8 *addr) -{ - u8 tid; - - if (!iwl_is_ready(priv)) { - IWL_DEBUG_INFO(priv, - "Unable to remove station %pM, device not ready.\n", - addr); - /* - * It is typical for stations to be removed when we are - * going down. Return success since device will be down - * soon anyway - */ - return 0; - } - - IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", - sta_id, addr); - - if (WARN_ON(sta_id == IWL_INVALID_STATION)) - return -EINVAL; - - spin_lock_bh(&priv->sta_lock); - - if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { - IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", - addr); - goto out_err; - } - - if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { - IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", - addr); - goto out_err; - } - - if (priv->stations[sta_id].used & IWL_STA_LOCAL) { - kfree(priv->stations[sta_id].lq); - priv->stations[sta_id].lq = NULL; - } - - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) - memset(&priv->tid_data[sta_id][tid], 0, - sizeof(priv->tid_data[sta_id][tid])); - - priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; - - priv->num_stations--; - - if (WARN_ON(priv->num_stations < 0)) - priv->num_stations = 0; - - spin_unlock_bh(&priv->sta_lock); - - return iwl_send_remove_station(priv, addr, sta_id, false); -out_err: - spin_unlock_bh(&priv->sta_lock); - return -EINVAL; -} - -void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, - const u8 *addr) -{ - u8 tid; - - if (!iwl_is_ready(priv)) { - IWL_DEBUG_INFO(priv, - "Unable to remove station %pM, device not ready.\n", - addr); - return; - } - - IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id); - - if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION)) - return; - - spin_lock_bh(&priv->sta_lock); - - WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)); - - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) - memset(&priv->tid_data[sta_id][tid], 0, - sizeof(priv->tid_data[sta_id][tid])); - - priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; - priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; - - priv->num_stations--; - - if (WARN_ON_ONCE(priv->num_stations < 0)) - priv->num_stations = 0; - - spin_unlock_bh(&priv->sta_lock); -} - -static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - u8 sta_id, struct iwl_link_quality_cmd *link_cmd) -{ - int i, r; - u32 rate_flags = 0; - __le32 rate_n_flags; - - lockdep_assert_held(&priv->mutex); - - memset(link_cmd, 0, sizeof(*link_cmd)); - - /* Set up the rate scaling to start at selected rate, fall back - * all the way down to 1M in IEEE order, and then spin on 1M */ - if (priv->band == IEEE80211_BAND_5GHZ) - r = IWL_RATE_6M_INDEX; - else if (ctx && ctx->vif && ctx->vif->p2p) - r = IWL_RATE_6M_INDEX; - else - r = IWL_RATE_1M_INDEX; - - if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; - - rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) << - RATE_MCS_ANT_POS; - rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - link_cmd->rs_table[i].rate_n_flags = rate_n_flags; - - link_cmd->general_params.single_stream_ant_msk = - first_antenna(priv->nvm_data->valid_tx_ant); - - link_cmd->general_params.dual_stream_ant_msk = - priv->nvm_data->valid_tx_ant & - ~first_antenna(priv->nvm_data->valid_tx_ant); - if (!link_cmd->general_params.dual_stream_ant_msk) { - link_cmd->general_params.dual_stream_ant_msk = ANT_AB; - } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) { - link_cmd->general_params.dual_stream_ant_msk = - priv->nvm_data->valid_tx_ant; - } - - link_cmd->agg_params.agg_dis_start_th = - LINK_QUAL_AGG_DISABLE_START_DEF; - link_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); - - link_cmd->sta_id = sta_id; -} - -/** - * iwl_clear_ucode_stations - clear ucode station table bits - * - * This function clears all the bits in the driver indicating - * which stations are active in the ucode. Call when something - * other than explicit station management would cause this in - * the ucode, e.g. unassociated RXON. - */ -void iwl_clear_ucode_stations(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - int i; - bool cleared = false; - - IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); - - spin_lock_bh(&priv->sta_lock); - for (i = 0; i < IWLAGN_STATION_COUNT; i++) { - if (ctx && ctx->ctxid != priv->stations[i].ctxid) - continue; - - if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { - IWL_DEBUG_INFO(priv, - "Clearing ucode active for station %d\n", i); - priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; - cleared = true; - } - } - spin_unlock_bh(&priv->sta_lock); - - if (!cleared) - IWL_DEBUG_INFO(priv, - "No active stations found to be cleared\n"); -} - -/** - * iwl_restore_stations() - Restore driver known stations to device - * - * All stations considered active by driver, but not present in ucode, is - * restored. - * - * Function sleeps. - */ -void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - struct iwl_addsta_cmd sta_cmd; - static const struct iwl_link_quality_cmd zero_lq = {}; - struct iwl_link_quality_cmd lq; - int i; - bool found = false; - int ret; - bool send_lq; - - if (!iwl_is_ready(priv)) { - IWL_DEBUG_INFO(priv, - "Not ready yet, not restoring any stations.\n"); - return; - } - - IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); - spin_lock_bh(&priv->sta_lock); - for (i = 0; i < IWLAGN_STATION_COUNT; i++) { - if (ctx->ctxid != priv->stations[i].ctxid) - continue; - if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && - !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { - IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", - priv->stations[i].sta.sta.addr); - priv->stations[i].sta.mode = 0; - priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; - found = true; - } - } - - for (i = 0; i < IWLAGN_STATION_COUNT; i++) { - if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { - memcpy(&sta_cmd, &priv->stations[i].sta, - sizeof(struct iwl_addsta_cmd)); - send_lq = false; - if (priv->stations[i].lq) { - if (priv->wowlan) - iwl_sta_fill_lq(priv, ctx, i, &lq); - else - memcpy(&lq, priv->stations[i].lq, - sizeof(struct iwl_link_quality_cmd)); - - if (memcmp(&lq, &zero_lq, sizeof(lq))) - send_lq = true; - } - spin_unlock_bh(&priv->sta_lock); - ret = iwl_send_add_sta(priv, &sta_cmd, 0); - if (ret) { - spin_lock_bh(&priv->sta_lock); - IWL_ERR(priv, "Adding station %pM failed.\n", - priv->stations[i].sta.sta.addr); - priv->stations[i].used &= - ~IWL_STA_DRIVER_ACTIVE; - priv->stations[i].used &= - ~IWL_STA_UCODE_INPROGRESS; - continue; - } - /* - * Rate scaling has already been initialized, send - * current LQ command - */ - if (send_lq) - iwl_send_lq_cmd(priv, ctx, &lq, 0, true); - spin_lock_bh(&priv->sta_lock); - priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; - } - } - - spin_unlock_bh(&priv->sta_lock); - if (!found) - IWL_DEBUG_INFO(priv, "Restoring all known stations .... " - "no stations to be restored.\n"); - else - IWL_DEBUG_INFO(priv, "Restoring all known stations .... " - "complete.\n"); -} - -int iwl_get_free_ucode_key_offset(struct iwl_priv *priv) -{ - int i; - - for (i = 0; i < priv->sta_key_max_num; i++) - if (!test_and_set_bit(i, &priv->ucode_key_table)) - return i; - - return WEP_INVALID_OFFSET; -} - -void iwl_dealloc_bcast_stations(struct iwl_priv *priv) -{ - int i; - - spin_lock_bh(&priv->sta_lock); - for (i = 0; i < IWLAGN_STATION_COUNT; i++) { - if (!(priv->stations[i].used & IWL_STA_BCAST)) - continue; - - priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; - priv->num_stations--; - if (WARN_ON(priv->num_stations < 0)) - priv->num_stations = 0; - kfree(priv->stations[i].lq); - priv->stations[i].lq = NULL; - } - spin_unlock_bh(&priv->sta_lock); -} - -#ifdef CONFIG_IWLWIFI_DEBUG -static void iwl_dump_lq_cmd(struct iwl_priv *priv, - struct iwl_link_quality_cmd *lq) -{ - int i; - IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); - IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", - lq->general_params.single_stream_ant_msk, - lq->general_params.dual_stream_ant_msk); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", - i, lq->rs_table[i].rate_n_flags); -} -#else -static inline void iwl_dump_lq_cmd(struct iwl_priv *priv, - struct iwl_link_quality_cmd *lq) -{ -} -#endif - -/** - * is_lq_table_valid() - Test one aspect of LQ cmd for validity - * - * It sometimes happens when a HT rate has been in use and we - * loose connectivity with AP then mac80211 will first tell us that the - * current channel is not HT anymore before removing the station. In such a - * scenario the RXON flags will be updated to indicate we are not - * communicating HT anymore, but the LQ command may still contain HT rates. - * Test for this to prevent driver from sending LQ command between the time - * RXON flags are updated and when LQ command is updated. - */ -static bool is_lq_table_valid(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_link_quality_cmd *lq) -{ - int i; - - if (ctx->ht.enabled) - return true; - - IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", - ctx->active.channel); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & - RATE_MCS_HT_MSK) { - IWL_DEBUG_INFO(priv, - "index %d of LQ expects HT channel\n", - i); - return false; - } - } - return true; -} - -/** - * iwl_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. - * - * The link quality command is sent as the last step of station creation. - * This is the special case in which init is set and we call a callback in - * this case to clear the state indicating that station creation is in - * progress. - */ -int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct iwl_link_quality_cmd *lq, u8 flags, bool init) -{ - int ret = 0; - struct iwl_host_cmd cmd = { - .id = REPLY_TX_LINK_QUALITY_CMD, - .len = { sizeof(struct iwl_link_quality_cmd), }, - .flags = flags, - .data = { lq, }, - }; - - if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) - return -EINVAL; - - - spin_lock_bh(&priv->sta_lock); - if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { - spin_unlock_bh(&priv->sta_lock); - return -EINVAL; - } - spin_unlock_bh(&priv->sta_lock); - - iwl_dump_lq_cmd(priv, lq); - if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) - return -EINVAL; - - if (is_lq_table_valid(priv, ctx, lq)) - ret = iwl_dvm_send_cmd(priv, &cmd); - else - ret = -EINVAL; - - if (cmd.flags & CMD_ASYNC) - return ret; - - if (init) { - IWL_DEBUG_INFO(priv, "init LQ command complete, " - "clearing sta addition status for sta %d\n", - lq->sta_id); - spin_lock_bh(&priv->sta_lock); - priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; - spin_unlock_bh(&priv->sta_lock); - } - return ret; -} - - -static struct iwl_link_quality_cmd * -iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - u8 sta_id) -{ - struct iwl_link_quality_cmd *link_cmd; - - link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); - if (!link_cmd) { - IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); - return NULL; - } - - iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd); - - return link_cmd; -} - -/* - * iwlagn_add_bssid_station - Add the special IBSS BSSID station - * - * Function sleeps. - */ -int iwlagn_add_bssid_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr, u8 *sta_id_r) -{ - int ret; - u8 sta_id; - struct iwl_link_quality_cmd *link_cmd; - - if (sta_id_r) - *sta_id_r = IWL_INVALID_STATION; - - ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); - if (ret) { - IWL_ERR(priv, "Unable to add station %pM\n", addr); - return ret; - } - - if (sta_id_r) - *sta_id_r = sta_id; - - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].used |= IWL_STA_LOCAL; - spin_unlock_bh(&priv->sta_lock); - - /* Set up default rate scaling table in device's station table */ - link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); - if (!link_cmd) { - IWL_ERR(priv, - "Unable to initialize rate scaling for station %pM.\n", - addr); - return -ENOMEM; - } - - ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true); - if (ret) - IWL_ERR(priv, "Link quality command failed (%d)\n", ret); - - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].lq = link_cmd; - spin_unlock_bh(&priv->sta_lock); - - return 0; -} - -/* - * static WEP keys - * - * For each context, the device has a table of 4 static WEP keys - * (one for each key index) that is updated with the following - * commands. - */ - -static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - bool send_if_empty) -{ - int i, not_empty = 0; - u8 buff[sizeof(struct iwl_wep_cmd) + - sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; - struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; - size_t cmd_size = sizeof(struct iwl_wep_cmd); - struct iwl_host_cmd cmd = { - .id = ctx->wep_key_cmd, - .data = { wep_cmd, }, - }; - - might_sleep(); - - memset(wep_cmd, 0, cmd_size + - (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); - - for (i = 0; i < WEP_KEYS_MAX ; i++) { - wep_cmd->key[i].key_index = i; - if (ctx->wep_keys[i].key_size) { - wep_cmd->key[i].key_offset = i; - not_empty = 1; - } else { - wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; - } - - wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; - memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, - ctx->wep_keys[i].key_size); - } - - wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; - wep_cmd->num_keys = WEP_KEYS_MAX; - - cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; - - cmd.len[0] = cmd_size; - - if (not_empty || send_if_empty) - return iwl_dvm_send_cmd(priv, &cmd); - else - return 0; -} - -int iwl_restore_default_wep_keys(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - lockdep_assert_held(&priv->mutex); - - return iwl_send_static_wepkey_cmd(priv, ctx, false); -} - -int iwl_remove_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", - keyconf->keyidx); - - memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); - if (iwl_is_rfkill(priv)) { - IWL_DEBUG_WEP(priv, - "Not sending REPLY_WEPKEY command due to RFKILL.\n"); - /* but keys in device are clear anyway so return success */ - return 0; - } - ret = iwl_send_static_wepkey_cmd(priv, ctx, 1); - IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", - keyconf->keyidx, ret); - - return ret; -} - -int iwl_set_default_wep_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf) -{ - int ret; - - lockdep_assert_held(&priv->mutex); - - if (keyconf->keylen != WEP_KEY_LEN_128 && - keyconf->keylen != WEP_KEY_LEN_64) { - IWL_DEBUG_WEP(priv, - "Bad WEP key length %d\n", keyconf->keylen); - return -EINVAL; - } - - keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT; - - ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; - memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, - keyconf->keylen); - - ret = iwl_send_static_wepkey_cmd(priv, ctx, false); - IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", - keyconf->keylen, keyconf->keyidx, ret); - - return ret; -} - -/* - * dynamic (per-station) keys - * - * The dynamic keys are a little more complicated. The device has - * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys. - * These are linked to stations by a table that contains an index - * into the key table for each station/key index/{mcast,unicast}, - * i.e. it's basically an array of pointers like this: - * key_offset_t key_mapping[NUM_STATIONS][4][2]; - * (it really works differently, but you can think of it as such) - * - * The key uploading and linking happens in the same command, the - * add station command with STA_MODIFY_KEY_MASK. - */ - -static u8 iwlagn_key_sta_id(struct iwl_priv *priv, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - if (sta) - return iwl_sta_id(sta); - - /* - * The device expects GTKs for station interfaces to be - * installed as GTKs for the AP station. If we have no - * station ID, then use the ap_sta_id in that case. - */ - if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx) - return vif_priv->ctx->ap_sta_id; - - return IWL_INVALID_STATION; -} - -static int iwlagn_send_sta_key(struct iwl_priv *priv, - struct ieee80211_key_conf *keyconf, - u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, - u32 cmd_flags) -{ - __le16 key_flags; - struct iwl_addsta_cmd sta_cmd; - int i; - - spin_lock_bh(&priv->sta_lock); - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); - spin_unlock_bh(&priv->sta_lock); - - key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags |= STA_KEY_FLG_MAP_KEY_MSK; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - key_flags |= STA_KEY_FLG_CCMP; - memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); - break; - case WLAN_CIPHER_SUITE_TKIP: - key_flags |= STA_KEY_FLG_TKIP; - sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; - for (i = 0; i < 5; i++) - sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); - memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); - break; - case WLAN_CIPHER_SUITE_WEP104: - key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - key_flags |= STA_KEY_FLG_WEP; - memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen); - break; - default: - WARN_ON(1); - return -EINVAL; - } - - if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - key_flags |= STA_KEY_MULTICAST_MSK; - - /* key pointer (offset) */ - sta_cmd.key.key_offset = keyconf->hw_key_idx; - - sta_cmd.key.key_flags = key_flags; - sta_cmd.mode = STA_CONTROL_MODIFY_MSK; - sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; - - return iwl_send_add_sta(priv, &sta_cmd, cmd_flags); -} - -void iwl_update_tkip_key(struct iwl_priv *priv, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) -{ - u8 sta_id = iwlagn_key_sta_id(priv, vif, sta); - - if (sta_id == IWL_INVALID_STATION) - return; - - if (iwl_scan_cancel(priv)) { - /* cancel scan failed, just live w/ bad key and rely - briefly on SW decryption */ - return; - } - - iwlagn_send_sta_key(priv, keyconf, sta_id, - iv32, phase1key, CMD_ASYNC); -} - -int iwl_remove_dynamic_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta) -{ - struct iwl_addsta_cmd sta_cmd; - u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta); - __le16 key_flags; - - /* if station isn't there, neither is the key */ - if (sta_id == IWL_INVALID_STATION) - return -ENOENT; - - spin_lock_bh(&priv->sta_lock); - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); - if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) - sta_id = IWL_INVALID_STATION; - spin_unlock_bh(&priv->sta_lock); - - if (sta_id == IWL_INVALID_STATION) - return 0; - - lockdep_assert_held(&priv->mutex); - - ctx->key_mapping_keys--; - - IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", - keyconf->keyidx, sta_id); - - if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table)) - IWL_ERR(priv, "offset %d not used in uCode key table.\n", - keyconf->hw_key_idx); - - key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC | - STA_KEY_FLG_INVALID; - - if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - key_flags |= STA_KEY_MULTICAST_MSK; - - sta_cmd.key.key_flags = key_flags; - sta_cmd.key.key_offset = keyconf->hw_key_idx; - sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; - sta_cmd.mode = STA_CONTROL_MODIFY_MSK; - - return iwl_send_add_sta(priv, &sta_cmd, 0); -} - -int iwl_set_dynamic_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta) -{ - struct ieee80211_key_seq seq; - u16 p1k[5]; - int ret; - u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta); - const u8 *addr; - - if (sta_id == IWL_INVALID_STATION) - return -EINVAL; - - lockdep_assert_held(&priv->mutex); - - keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv); - if (keyconf->hw_key_idx == WEP_INVALID_OFFSET) - return -ENOSPC; - - ctx->key_mapping_keys++; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - if (sta) - addr = sta->addr; - else /* station mode case only */ - addr = ctx->active.bssid_addr; - - /* pre-fill phase 1 key into device cache */ - ieee80211_get_key_rx_seq(keyconf, 0, &seq); - ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); - ret = iwlagn_send_sta_key(priv, keyconf, sta_id, - seq.tkip.iv32, p1k, 0); - break; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - ret = iwlagn_send_sta_key(priv, keyconf, sta_id, - 0, NULL, 0); - break; - default: - IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher); - ret = -EINVAL; - } - - if (ret) { - ctx->key_mapping_keys--; - clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table); - } - - IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", - keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta ? sta->addr : NULL, ret); - - return ret; -} - -/** - * iwlagn_alloc_bcast_station - add broadcast station into driver's station table. - * - * This adds the broadcast station into the driver's station table - * and marks it driver active, so that it will be restored to the - * device at the next best time. - */ -int iwlagn_alloc_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct iwl_link_quality_cmd *link_cmd; - u8 sta_id; - - spin_lock_bh(&priv->sta_lock); - sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Unable to prepare broadcast station\n"); - spin_unlock_bh(&priv->sta_lock); - - return -EINVAL; - } - - priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; - priv->stations[sta_id].used |= IWL_STA_BCAST; - spin_unlock_bh(&priv->sta_lock); - - link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); - if (!link_cmd) { - IWL_ERR(priv, - "Unable to initialize rate scaling for bcast station.\n"); - return -ENOMEM; - } - - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].lq = link_cmd; - spin_unlock_bh(&priv->sta_lock); - - return 0; -} - -/** - * iwl_update_bcast_station - update broadcast station's LQ command - * - * Only used by iwlagn. Placed here to have all bcast station management - * code together. - */ -int iwl_update_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - struct iwl_link_quality_cmd *link_cmd; - u8 sta_id = ctx->bcast_sta_id; - - link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); - if (!link_cmd) { - IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); - return -ENOMEM; - } - - spin_lock_bh(&priv->sta_lock); - if (priv->stations[sta_id].lq) - kfree(priv->stations[sta_id].lq); - else - IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n"); - priv->stations[sta_id].lq = link_cmd; - spin_unlock_bh(&priv->sta_lock); - - return 0; -} - -int iwl_update_bcast_stations(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx; - int ret = 0; - - for_each_context(priv, ctx) { - ret = iwl_update_bcast_station(priv, ctx); - if (ret) - break; - } - - return ret; -} - -/** - * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table - */ -int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) -{ - struct iwl_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - /* Remove "disable" flag, to enable Tx for this TID */ - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; - priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); - spin_unlock_bh(&priv->sta_lock); - - return iwl_send_add_sta(priv, &sta_cmd, 0); -} - -int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid, u16 ssn) -{ - int sta_id; - struct iwl_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - sta_id = iwl_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) - return -ENXIO; - - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].sta.station_flags_msk = 0; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; - priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; - priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); - spin_unlock_bh(&priv->sta_lock); - - return iwl_send_add_sta(priv, &sta_cmd, 0); -} - -int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, - int tid) -{ - int sta_id; - struct iwl_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - sta_id = iwl_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_bh(&priv->sta_lock); - priv->stations[sta_id].sta.station_flags_msk = 0; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; - priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); - spin_unlock_bh(&priv->sta_lock); - - return iwl_send_add_sta(priv, &sta_cmd, 0); -} - - - -void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) -{ - struct iwl_addsta_cmd cmd = { - .mode = STA_CONTROL_MODIFY_MSK, - .station_flags = STA_FLG_PWR_SAVE_MSK, - .station_flags_msk = STA_FLG_PWR_SAVE_MSK, - .sta.sta_id = sta_id, - .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK, - .sleep_tx_count = cpu_to_le16(cnt), - }; - - iwl_send_add_sta(priv, &cmd, CMD_ASYNC); -} diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c deleted file mode 100644 index c4736c8834c5..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/tt.c +++ /dev/null @@ -1,685 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - - -#include -#include -#include -#include -#include "iwl-io.h" -#include "iwl-modparams.h" -#include "iwl-debug.h" -#include "agn.h" -#include "dev.h" -#include "commands.h" -#include "tt.h" - -/* default Thermal Throttling transaction table - * Current state | Throttling Down | Throttling Up - *============================================================================= - * Condition Nxt State Condition Nxt State Condition Nxt State - *----------------------------------------------------------------------------- - * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A - * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0 - * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1 - * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 - *============================================================================= - */ -static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = { - {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104}, - {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1}, - {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} -}; -static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = { - {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95}, - {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1}, - {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} -}; -static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = { - {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100}, - {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}, - {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} -}; -static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = { - {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD}, - {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}, - {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX} -}; - -/* Advance Thermal Throttling default restriction table */ -static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = { - {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true }, - {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true }, - {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false }, - {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false } -}; - -bool iwl_tt_is_low_power_state(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - - if (tt->state >= IWL_TI_1) - return true; - return false; -} - -u8 iwl_tt_current_power_mode(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - - return tt->tt_power_mode; -} - -bool iwl_ht_enabled(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - struct iwl_tt_restriction *restriction; - - if (!priv->thermal_throttle.advanced_tt) - return true; - restriction = tt->restriction + tt->state; - return restriction->is_ht; -} - -static bool iwl_within_ct_kill_margin(struct iwl_priv *priv) -{ - s32 temp = priv->temperature; /* degrees CELSIUS except specified */ - bool within_margin = false; - - if (!priv->thermal_throttle.advanced_tt) - within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >= - CT_KILL_THRESHOLD_LEGACY) ? true : false; - else - within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >= - CT_KILL_THRESHOLD) ? true : false; - return within_margin; -} - -bool iwl_check_for_ct_kill(struct iwl_priv *priv) -{ - bool is_ct_kill = false; - - if (iwl_within_ct_kill_margin(priv)) { - iwl_tt_enter_ct_kill(priv); - is_ct_kill = true; - } - return is_ct_kill; -} - -enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - struct iwl_tt_restriction *restriction; - - if (!priv->thermal_throttle.advanced_tt) - return IWL_ANT_OK_MULTI; - restriction = tt->restriction + tt->state; - return restriction->tx_stream; -} - -enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - struct iwl_tt_restriction *restriction; - - if (!priv->thermal_throttle.advanced_tt) - return IWL_ANT_OK_MULTI; - restriction = tt->restriction + tt->state; - return restriction->rx_stream; -} - -#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */ -#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */ - -/* - * toggle the bit to wake up uCode and check the temperature - * if the temperature is below CT, uCode will stay awake and send card - * state notification with CT_KILL bit clear to inform Thermal Throttling - * Management to change state. Otherwise, uCode will go back to sleep - * without doing anything, driver should continue the 5 seconds timer - * to wake up uCode for temperature check until temperature drop below CT - */ -static void iwl_tt_check_exit_ct_kill(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - unsigned long flags; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (tt->state == IWL_TI_CT_KILL) { - if (priv->thermal_throttle.ct_kill_toggle) { - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - priv->thermal_throttle.ct_kill_toggle = false; - } else { - iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); - priv->thermal_throttle.ct_kill_toggle = true; - } - iwl_read32(priv->trans, CSR_UCODE_DRV_GP1); - if (iwl_trans_grab_nic_access(priv->trans, false, &flags)) - iwl_trans_release_nic_access(priv->trans, &flags); - - /* Reschedule the ct_kill timer to occur in - * CT_KILL_EXIT_DURATION seconds to ensure we get a - * thermal update */ - IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n"); - mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, - jiffies + CT_KILL_EXIT_DURATION * HZ); - } -} - -static void iwl_perform_ct_kill_task(struct iwl_priv *priv, - bool stop) -{ - if (stop) { - IWL_DEBUG_TEMP(priv, "Stop all queues\n"); - if (priv->mac80211_registered) - ieee80211_stop_queues(priv->hw); - IWL_DEBUG_TEMP(priv, - "Schedule 5 seconds CT_KILL Timer\n"); - mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, - jiffies + CT_KILL_EXIT_DURATION * HZ); - } else { - IWL_DEBUG_TEMP(priv, "Wake all queues\n"); - if (priv->mac80211_registered) - ieee80211_wake_queues(priv->hw); - } -} - -static void iwl_tt_ready_for_ct_kill(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* temperature timer expired, ready to go into CT_KILL state */ - if (tt->state != IWL_TI_CT_KILL) { - IWL_DEBUG_TEMP(priv, "entering CT_KILL state when " - "temperature timer expired\n"); - tt->state = IWL_TI_CT_KILL; - set_bit(STATUS_CT_KILL, &priv->status); - iwl_perform_ct_kill_task(priv, true); - } -} - -static void iwl_prepare_ct_kill_task(struct iwl_priv *priv) -{ - IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n"); - /* make request to retrieve statistics information */ - iwl_send_statistics_request(priv, 0, false); - /* Reschedule the ct_kill wait timer */ - mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm, - jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION)); -} - -#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY) -#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100) -#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90) - -/* - * Legacy thermal throttling - * 1) Avoid NIC destruction due to high temperatures - * Chip will identify dangerously high temperatures that can - * harm the device and will power down - * 2) Avoid the NIC power down due to high temperature - * Throttle early enough to lower the power consumption before - * drastic steps are needed - */ -static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - enum iwl_tt_state old_state; - -#ifdef CONFIG_IWLWIFI_DEBUG - if ((tt->tt_previous_temp) && - (temp > tt->tt_previous_temp) && - ((temp - tt->tt_previous_temp) > - IWL_TT_INCREASE_MARGIN)) { - IWL_DEBUG_TEMP(priv, - "Temperature increase %d degree Celsius\n", - (temp - tt->tt_previous_temp)); - } -#endif - old_state = tt->state; - /* in Celsius */ - if (temp >= IWL_MINIMAL_POWER_THRESHOLD) - tt->state = IWL_TI_CT_KILL; - else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2) - tt->state = IWL_TI_2; - else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1) - tt->state = IWL_TI_1; - else - tt->state = IWL_TI_0; - -#ifdef CONFIG_IWLWIFI_DEBUG - tt->tt_previous_temp = temp; -#endif - /* stop ct_kill_waiting_tm timer */ - del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); - if (tt->state != old_state) { - switch (tt->state) { - case IWL_TI_0: - /* - * When the system is ready to go back to IWL_TI_0 - * we only have to call iwl_power_update_mode() to - * do so. - */ - break; - case IWL_TI_1: - tt->tt_power_mode = IWL_POWER_INDEX_3; - break; - case IWL_TI_2: - tt->tt_power_mode = IWL_POWER_INDEX_4; - break; - default: - tt->tt_power_mode = IWL_POWER_INDEX_5; - break; - } - mutex_lock(&priv->mutex); - if (old_state == IWL_TI_CT_KILL) - clear_bit(STATUS_CT_KILL, &priv->status); - if (tt->state != IWL_TI_CT_KILL && - iwl_power_update_mode(priv, true)) { - /* TT state not updated - * try again during next temperature read - */ - if (old_state == IWL_TI_CT_KILL) - set_bit(STATUS_CT_KILL, &priv->status); - tt->state = old_state; - IWL_ERR(priv, "Cannot update power mode, " - "TT state not updated\n"); - } else { - if (tt->state == IWL_TI_CT_KILL) { - if (force) { - set_bit(STATUS_CT_KILL, &priv->status); - iwl_perform_ct_kill_task(priv, true); - } else { - iwl_prepare_ct_kill_task(priv); - tt->state = old_state; - } - } else if (old_state == IWL_TI_CT_KILL && - tt->state != IWL_TI_CT_KILL) - iwl_perform_ct_kill_task(priv, false); - IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n", - tt->state); - IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", - tt->tt_power_mode); - } - mutex_unlock(&priv->mutex); - } -} - -/* - * Advance thermal throttling - * 1) Avoid NIC destruction due to high temperatures - * Chip will identify dangerously high temperatures that can - * harm the device and will power down - * 2) Avoid the NIC power down due to high temperature - * Throttle early enough to lower the power consumption before - * drastic steps are needed - * Actions include relaxing the power down sleep thresholds and - * decreasing the number of TX streams - * 3) Avoid throughput performance impact as much as possible - * - *============================================================================= - * Condition Nxt State Condition Nxt State Condition Nxt State - *----------------------------------------------------------------------------- - * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A - * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0 - * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1 - * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 - *============================================================================= - */ -static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - int i; - bool changed = false; - enum iwl_tt_state old_state; - struct iwl_tt_trans *transaction; - - old_state = tt->state; - for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) { - /* based on the current TT state, - * find the curresponding transaction table - * each table has (IWL_TI_STATE_MAX - 1) entries - * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1)) - * will advance to the correct table. - * then based on the current temperature - * find the next state need to transaction to - * go through all the possible (IWL_TI_STATE_MAX - 1) entries - * in the current table to see if transaction is needed - */ - transaction = tt->transaction + - ((old_state * (IWL_TI_STATE_MAX - 1)) + i); - if (temp >= transaction->tt_low && - temp <= transaction->tt_high) { -#ifdef CONFIG_IWLWIFI_DEBUG - if ((tt->tt_previous_temp) && - (temp > tt->tt_previous_temp) && - ((temp - tt->tt_previous_temp) > - IWL_TT_INCREASE_MARGIN)) { - IWL_DEBUG_TEMP(priv, - "Temperature increase %d " - "degree Celsius\n", - (temp - tt->tt_previous_temp)); - } - tt->tt_previous_temp = temp; -#endif - if (old_state != - transaction->next_state) { - changed = true; - tt->state = - transaction->next_state; - } - break; - } - } - /* stop ct_kill_waiting_tm timer */ - del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); - if (changed) { - if (tt->state >= IWL_TI_1) { - /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */ - tt->tt_power_mode = IWL_POWER_INDEX_5; - - if (!iwl_ht_enabled(priv)) { - struct iwl_rxon_context *ctx; - - for_each_context(priv, ctx) { - struct iwl_rxon_cmd *rxon; - - rxon = &ctx->staging; - - /* disable HT */ - rxon->flags &= ~( - RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | - RXON_FLG_HT40_PROT_MSK | - RXON_FLG_HT_PROT_MSK); - } - } else { - /* check HT capability and set - * according to the system HT capability - * in case get disabled before */ - iwl_set_rxon_ht(priv, &priv->current_ht_config); - } - - } else { - /* - * restore system power setting -- it will be - * recalculated automatically. - */ - - /* check HT capability and set - * according to the system HT capability - * in case get disabled before */ - iwl_set_rxon_ht(priv, &priv->current_ht_config); - } - mutex_lock(&priv->mutex); - if (old_state == IWL_TI_CT_KILL) - clear_bit(STATUS_CT_KILL, &priv->status); - if (tt->state != IWL_TI_CT_KILL && - iwl_power_update_mode(priv, true)) { - /* TT state not updated - * try again during next temperature read - */ - IWL_ERR(priv, "Cannot update power mode, " - "TT state not updated\n"); - if (old_state == IWL_TI_CT_KILL) - set_bit(STATUS_CT_KILL, &priv->status); - tt->state = old_state; - } else { - IWL_DEBUG_TEMP(priv, - "Thermal Throttling to new state: %u\n", - tt->state); - if (old_state != IWL_TI_CT_KILL && - tt->state == IWL_TI_CT_KILL) { - if (force) { - IWL_DEBUG_TEMP(priv, - "Enter IWL_TI_CT_KILL\n"); - set_bit(STATUS_CT_KILL, &priv->status); - iwl_perform_ct_kill_task(priv, true); - } else { - tt->state = old_state; - iwl_prepare_ct_kill_task(priv); - } - } else if (old_state == IWL_TI_CT_KILL && - tt->state != IWL_TI_CT_KILL) { - IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n"); - iwl_perform_ct_kill_task(priv, false); - } - } - mutex_unlock(&priv->mutex); - } -} - -/* Card State Notification indicated reach critical temperature - * if PSP not enable, no Thermal Throttling function will be performed - * just set the GP1 bit to acknowledge the event - * otherwise, go into IWL_TI_CT_KILL state - * since Card State Notification will not provide any temperature reading - * for Legacy mode - * so just pass the CT_KILL temperature to iwl_legacy_tt_handler() - * for advance mode - * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state - */ -static void iwl_bg_ct_enter(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter); - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!iwl_is_ready(priv)) - return; - - if (tt->state != IWL_TI_CT_KILL) { - IWL_ERR(priv, "Device reached critical temperature " - "- ucode going to sleep!\n"); - if (!priv->thermal_throttle.advanced_tt) - iwl_legacy_tt_handler(priv, - IWL_MINIMAL_POWER_THRESHOLD, - true); - else - iwl_advance_tt_handler(priv, - CT_KILL_THRESHOLD + 1, true); - } -} - -/* Card State Notification indicated out of critical temperature - * since Card State Notification will not provide any temperature reading - * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature - * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state - */ -static void iwl_bg_ct_exit(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit); - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!iwl_is_ready(priv)) - return; - - /* stop ct_kill_exit_tm timer */ - del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); - - if (tt->state == IWL_TI_CT_KILL) { - IWL_ERR(priv, - "Device temperature below critical" - "- ucode awake!\n"); - /* - * exit from CT_KILL state - * reset the current temperature reading - */ - priv->temperature = 0; - if (!priv->thermal_throttle.advanced_tt) - iwl_legacy_tt_handler(priv, - IWL_REDUCED_PERFORMANCE_THRESHOLD_2, - true); - else - iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD, - true); - } -} - -void iwl_tt_enter_ct_kill(struct iwl_priv *priv) -{ - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n"); - queue_work(priv->workqueue, &priv->ct_enter); -} - -void iwl_tt_exit_ct_kill(struct iwl_priv *priv) -{ - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n"); - queue_work(priv->workqueue, &priv->ct_exit); -} - -static void iwl_bg_tt_work(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); - s32 temp = priv->temperature; /* degrees CELSIUS except specified */ - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!priv->thermal_throttle.advanced_tt) - iwl_legacy_tt_handler(priv, temp, false); - else - iwl_advance_tt_handler(priv, temp, false); -} - -void iwl_tt_handler(struct iwl_priv *priv) -{ - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n"); - queue_work(priv->workqueue, &priv->tt_work); -} - -/* Thermal throttling initialization - * For advance thermal throttling: - * Initialize Thermal Index and temperature threshold table - * Initialize thermal throttling restriction table - */ -void iwl_tt_initialize(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); - struct iwl_tt_trans *transaction; - - IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n"); - - memset(tt, 0, sizeof(struct iwl_tt_mgmt)); - - tt->state = IWL_TI_0; - setup_timer(&priv->thermal_throttle.ct_kill_exit_tm, - iwl_tt_check_exit_ct_kill, (unsigned long)priv); - setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm, - iwl_tt_ready_for_ct_kill, (unsigned long)priv); - /* setup deferred ct kill work */ - INIT_WORK(&priv->tt_work, iwl_bg_tt_work); - INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); - INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); - - if (priv->lib->adv_thermal_throttle) { - IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); - tt->restriction = kcalloc(IWL_TI_STATE_MAX, - sizeof(struct iwl_tt_restriction), - GFP_KERNEL); - tt->transaction = kcalloc(IWL_TI_STATE_MAX * - (IWL_TI_STATE_MAX - 1), - sizeof(struct iwl_tt_trans), - GFP_KERNEL); - if (!tt->restriction || !tt->transaction) { - IWL_ERR(priv, "Fallback to Legacy Throttling\n"); - priv->thermal_throttle.advanced_tt = false; - kfree(tt->restriction); - tt->restriction = NULL; - kfree(tt->transaction); - tt->transaction = NULL; - } else { - transaction = tt->transaction + - (IWL_TI_0 * (IWL_TI_STATE_MAX - 1)); - memcpy(transaction, &tt_range_0[0], size); - transaction = tt->transaction + - (IWL_TI_1 * (IWL_TI_STATE_MAX - 1)); - memcpy(transaction, &tt_range_1[0], size); - transaction = tt->transaction + - (IWL_TI_2 * (IWL_TI_STATE_MAX - 1)); - memcpy(transaction, &tt_range_2[0], size); - transaction = tt->transaction + - (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1)); - memcpy(transaction, &tt_range_3[0], size); - size = sizeof(struct iwl_tt_restriction) * - IWL_TI_STATE_MAX; - memcpy(tt->restriction, - &restriction_range[0], size); - priv->thermal_throttle.advanced_tt = true; - } - } else { - IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n"); - priv->thermal_throttle.advanced_tt = false; - } -} - -/* cleanup thermal throttling management related memory and timer */ -void iwl_tt_exit(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - - /* stop ct_kill_exit_tm timer if activated */ - del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); - /* stop ct_kill_waiting_tm timer if activated */ - del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); - cancel_work_sync(&priv->tt_work); - cancel_work_sync(&priv->ct_enter); - cancel_work_sync(&priv->ct_exit); - - if (priv->thermal_throttle.advanced_tt) { - /* free advance thermal throttling memory */ - kfree(tt->restriction); - tt->restriction = NULL; - kfree(tt->transaction); - tt->transaction = NULL; - } -} diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h deleted file mode 100644 index 507726534b84..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/tt.h +++ /dev/null @@ -1,128 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ -#ifndef __iwl_tt_setting_h__ -#define __iwl_tt_setting_h__ - -#include "commands.h" - -#define IWL_ABSOLUTE_ZERO 0 -#define IWL_ABSOLUTE_MAX 0xFFFFFFFF -#define IWL_TT_INCREASE_MARGIN 5 -#define IWL_TT_CT_KILL_MARGIN 3 - -enum iwl_antenna_ok { - IWL_ANT_OK_NONE, - IWL_ANT_OK_SINGLE, - IWL_ANT_OK_MULTI, -}; - -/* Thermal Throttling State Machine states */ -enum iwl_tt_state { - IWL_TI_0, /* normal temperature, system power state */ - IWL_TI_1, /* high temperature detect, low power state */ - IWL_TI_2, /* higher temperature detected, lower power state */ - IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */ - IWL_TI_STATE_MAX -}; - -/** - * struct iwl_tt_restriction - Thermal Throttling restriction table - * @tx_stream: number of tx stream allowed - * @is_ht: ht enable/disable - * @rx_stream: number of rx stream allowed - * - * This table is used by advance thermal throttling management - * based on the current thermal throttling state, and determines - * the number of tx/rx streams and the status of HT operation. - */ -struct iwl_tt_restriction { - enum iwl_antenna_ok tx_stream; - enum iwl_antenna_ok rx_stream; - bool is_ht; -}; - -/** - * struct iwl_tt_trans - Thermal Throttling transaction table - * @next_state: next thermal throttling mode - * @tt_low: low temperature threshold to change state - * @tt_high: high temperature threshold to change state - * - * This is used by the advanced thermal throttling algorithm - * to determine the next thermal state to go based on the - * current temperature. - */ -struct iwl_tt_trans { - enum iwl_tt_state next_state; - u32 tt_low; - u32 tt_high; -}; - -/** - * struct iwl_tt_mgnt - Thermal Throttling Management structure - * @advanced_tt: advanced thermal throttle required - * @state: current Thermal Throttling state - * @tt_power_mode: Thermal Throttling power mode index - * being used to set power level when - * when thermal throttling state != IWL_TI_0 - * the tt_power_mode should set to different - * power mode based on the current tt state - * @tt_previous_temperature: last measured temperature - * @iwl_tt_restriction: ptr to restriction tbl, used by advance - * thermal throttling to determine how many tx/rx streams - * should be used in tt state; and can HT be enabled or not - * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling - * state transaction - * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature - * @ct_kill_exit_tm: timer to exit thermal kill - */ -struct iwl_tt_mgmt { - enum iwl_tt_state state; - bool advanced_tt; - u8 tt_power_mode; - bool ct_kill_toggle; -#ifdef CONFIG_IWLWIFI_DEBUG - s32 tt_previous_temp; -#endif - struct iwl_tt_restriction *restriction; - struct iwl_tt_trans *transaction; - struct timer_list ct_kill_exit_tm; - struct timer_list ct_kill_waiting_tm; -}; - -u8 iwl_tt_current_power_mode(struct iwl_priv *priv); -bool iwl_tt_is_low_power_state(struct iwl_priv *priv); -bool iwl_ht_enabled(struct iwl_priv *priv); -enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv); -enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv); -void iwl_tt_enter_ct_kill(struct iwl_priv *priv); -void iwl_tt_exit_ct_kill(struct iwl_priv *priv); -void iwl_tt_handler(struct iwl_priv *priv); -void iwl_tt_initialize(struct iwl_priv *priv); -void iwl_tt_exit(struct iwl_priv *priv); - -#endif /* __iwl_tt_setting_h__ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c deleted file mode 100644 index bddd19769035..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ /dev/null @@ -1,1412 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include -#include -#include "iwl-io.h" -#include "iwl-trans.h" -#include "iwl-agn-hw.h" -#include "dev.h" -#include "agn.h" - -static const u8 tid_to_ac[] = { - IEEE80211_AC_BE, - IEEE80211_AC_BK, - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_VO, -}; - -static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags) -{ - if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || - info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT || - info->flags & IEEE80211_TX_CTL_AMPDU) - *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; -} - -/* - * handle build REPLY_TX command notification. - */ -static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, - struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr, u8 sta_id) -{ - __le16 fc = hdr->frame_control; - __le32 tx_flags = tx_cmd->tx_flags; - - tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - tx_flags |= TX_CMD_FLG_ACK_MSK; - else - tx_flags &= ~TX_CMD_FLG_ACK_MSK; - - if (ieee80211_is_probe_resp(fc)) - tx_flags |= TX_CMD_FLG_TSF_MSK; - else if (ieee80211_is_back_req(fc)) - tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; - else if (info->band == IEEE80211_BAND_2GHZ && - priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist && - (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || - ieee80211_is_reassoc_req(fc) || - info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) - tx_flags |= TX_CMD_FLG_IGNORE_BT; - - - tx_cmd->sta_id = sta_id; - if (ieee80211_has_morefrags(fc)) - tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tx_cmd->tid_tspec = qc[0] & 0xf; - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; - } else { - tx_cmd->tid_tspec = IWL_TID_NON_QOS; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - else - tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; - } - - iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); - - tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); - if (ieee80211_is_mgmt(fc)) { - if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); - else - tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); - } else { - tx_cmd->timeout.pm_frame_timeout = 0; - } - - tx_cmd->driver_txop = 0; - tx_cmd->tx_flags = tx_flags; - tx_cmd->next_frame_len = 0; -} - -static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, - __le16 fc) -{ - u32 rate_flags; - int rate_idx; - u8 rts_retry_limit; - u8 data_retry_limit; - u8 rate_plcp; - - if (priv->wowlan) { - rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT; - data_retry_limit = IWLAGN_LOW_RETRY_LIMIT; - } else { - /* Set retry limit on RTS packets */ - rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT; - - /* Set retry limit on DATA packets and Probe Responses*/ - if (ieee80211_is_probe_resp(fc)) { - data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT; - rts_retry_limit = - min(data_retry_limit, rts_retry_limit); - } else if (ieee80211_is_back_req(fc)) - data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT; - else - data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; - } - - tx_cmd->data_retry_limit = data_retry_limit; - tx_cmd->rts_retry_limit = rts_retry_limit; - - /* DATA packets will use the uCode station table for rate/antenna - * selection */ - if (ieee80211_is_data(fc)) { - tx_cmd->initial_rate_index = 0; - tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; - return; - } else if (ieee80211_is_back_req(fc)) - tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; - - /** - * If the current TX rate stored in mac80211 has the MCS bit set, it's - * not really a TX rate. Thus, we use the lowest supported rate for - * this band. Also use the lowest supported rate if the stored rate - * index is invalid. - */ - rate_idx = info->control.rates[0].idx; - if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || - (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) - rate_idx = rate_lowest_index( - &priv->nvm_data->bands[info->band], sta); - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_rates[rate_idx].plcp; - /* Zero out flags for this packet */ - rate_flags = 0; - - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - - /* Set up antennas */ - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist && - priv->bt_full_concurrent) { - /* operated as 1x1 in full concurrency mode */ - priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, - first_antenna(priv->nvm_data->valid_tx_ant)); - } else - priv->mgmt_tx_ant = iwl_toggle_tx_ant( - priv, priv->mgmt_tx_ant, - priv->nvm_data->valid_tx_ant); - rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); - - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); -} - -static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag) -{ - struct ieee80211_key_conf *keyconf = info->control.hw_key; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; - break; - - case WLAN_CIPHER_SUITE_TKIP: - tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; - ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); - break; - - case WLAN_CIPHER_SUITE_WEP104: - tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | - (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); - - memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); - - IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " - "with key %d\n", keyconf->keyidx); - break; - - default: - IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); - break; - } -} - -/** - * iwl_sta_id_or_broadcast - return sta_id or broadcast sta - * @context: the current context - * @sta: mac80211 station - * - * In certain circumstances mac80211 passes a station pointer - * that may be %NULL, for example during TX or key setup. In - * that case, we need to use the broadcast station, so this - * inline wraps that pattern. - */ -static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context, - struct ieee80211_sta *sta) -{ - int sta_id; - - if (!sta) - return context->bcast_sta_id; - - sta_id = iwl_sta_id(sta); - - /* - * mac80211 should not be passing a partially - * initialised station! - */ - WARN_ON(sta_id == IWL_INVALID_STATION); - - return sta_id; -} - -/* - * start REPLY_TX command process - */ -int iwlagn_tx_skb(struct iwl_priv *priv, - struct ieee80211_sta *sta, - struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_station_priv *sta_priv = NULL; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; - __le16 fc; - u8 hdr_len; - u16 len, seq_number = 0; - u8 sta_id, tid = IWL_MAX_TID_COUNT; - bool is_agg = false, is_data_qos = false; - int txq_id; - - if (info->control.vif) - ctx = iwl_rxon_ctx_from_vif(info->control.vif); - - if (iwl_is_rfkill(priv)) { - IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); - goto drop_unlock_priv; - } - - fc = hdr->frame_control; - -#ifdef CONFIG_IWLWIFI_DEBUG - if (ieee80211_is_auth(fc)) - IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); - else if (ieee80211_is_assoc_req(fc)) - IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); - else if (ieee80211_is_reassoc_req(fc)) - IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); -#endif - - if (unlikely(ieee80211_is_probe_resp(fc))) { - struct iwl_wipan_noa_data *noa_data = - rcu_dereference(priv->noa_data); - - if (noa_data && - pskb_expand_head(skb, 0, noa_data->length, - GFP_ATOMIC) == 0) { - memcpy(skb_put(skb, noa_data->length), - noa_data->data, noa_data->length); - hdr = (struct ieee80211_hdr *)skb->data; - } - } - - hdr_len = ieee80211_hdrlen(fc); - - /* For management frames use broadcast id to do not break aggregation */ - if (!ieee80211_is_data(fc)) - sta_id = ctx->bcast_sta_id; - else { - /* Find index into station table for destination station */ - sta_id = iwl_sta_id_or_broadcast(ctx, sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", - hdr->addr1); - goto drop_unlock_priv; - } - } - - if (sta) - sta_priv = (void *)sta->drv_priv; - - if (sta_priv && sta_priv->asleep && - (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { - /* - * This sends an asynchronous command to the device, - * but we can rely on it being processed before the - * next frame is processed -- and the next frame to - * this station is the one that will consume this - * counter. - * For now set the counter to just 1 since we do not - * support uAPSD yet. - * - * FIXME: If we get two non-bufferable frames one - * after the other, we might only send out one of - * them because this is racy. - */ - iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); - } - - dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans); - - if (unlikely(!dev_cmd)) - goto drop_unlock_priv; - - memset(dev_cmd, 0, sizeof(*dev_cmd)); - dev_cmd->hdr.cmd = REPLY_TX; - tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; - - /* Total # bytes to be transmitted */ - len = (u16)skb->len; - tx_cmd->len = cpu_to_le16(len); - - if (info->control.hw_key) - iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb); - - /* TODO need this for burst mode later on */ - iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); - - iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc); - - memset(&info->status, 0, sizeof(info->status)); - - info->driver_data[0] = ctx; - info->driver_data[1] = dev_cmd; - /* From now on, we cannot access info->control */ - - spin_lock(&priv->sta_lock); - - if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - u8 *qc = NULL; - struct iwl_tid_data *tid_data; - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) - goto drop_unlock_sta; - tid_data = &priv->tid_data[sta_id][tid]; - - /* aggregation is on for this */ - if (info->flags & IEEE80211_TX_CTL_AMPDU && - tid_data->agg.state != IWL_AGG_ON) { - IWL_ERR(priv, - "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n", - info->flags, tid_data->agg.state); - IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n", - sta_id, tid, - IEEE80211_SEQ_TO_SN(tid_data->seq_number)); - goto drop_unlock_sta; - } - - /* We can receive packets from the stack in IWL_AGG_{ON,OFF} - * only. Check this here. - */ - if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && - tid_data->agg.state != IWL_AGG_OFF, - "Tx while agg.state = %d\n", tid_data->agg.state)) - goto drop_unlock_sta; - - seq_number = tid_data->seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); - seq_number += 0x10; - - if (info->flags & IEEE80211_TX_CTL_AMPDU) - is_agg = true; - is_data_qos = true; - } - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdr_len); - - txq_id = info->hw_queue; - - if (is_agg) - txq_id = priv->tid_data[sta_id][tid].agg.txq_id; - else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { - /* - * The microcode will clear the more data - * bit in the last frame it transmits. - */ - hdr->frame_control |= - cpu_to_le16(IEEE80211_FCTL_MOREDATA); - } - - WARN_ON_ONCE(is_agg && - priv->queue_to_mac80211[txq_id] != info->hw_queue); - - IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid, - txq_id, seq_number); - - if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id)) - goto drop_unlock_sta; - - if (is_data_qos && !ieee80211_has_morefrags(fc)) - priv->tid_data[sta_id][tid].seq_number = seq_number; - - spin_unlock(&priv->sta_lock); - - /* - * Avoid atomic ops if it isn't an associated client. - * Also, if this is a packet for aggregation, don't - * increase the counter because the ucode will stop - * aggregation queues when their respective station - * goes to sleep. - */ - if (sta_priv && sta_priv->client && !is_agg) - atomic_inc(&sta_priv->pending_frames); - - return 0; - -drop_unlock_sta: - if (dev_cmd) - iwl_trans_free_tx_cmd(priv->trans, dev_cmd); - spin_unlock(&priv->sta_lock); -drop_unlock_priv: - return -1; -} - -static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) -{ - int q; - - for (q = IWLAGN_FIRST_AMPDU_QUEUE; - q < priv->cfg->base_params->num_of_queues; q++) { - if (!test_and_set_bit(q, priv->agg_q_alloc)) { - priv->queue_to_mac80211[q] = mq; - return q; - } - } - - return -ENOSPC; -} - -static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q) -{ - clear_bit(q, priv->agg_q_alloc); - priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE; -} - -int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - struct iwl_tid_data *tid_data; - int sta_id, txq_id; - enum iwl_agg_state agg_state; - - sta_id = iwl_sta_id(sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); - return -ENXIO; - } - - spin_lock_bh(&priv->sta_lock); - - tid_data = &priv->tid_data[sta_id][tid]; - txq_id = tid_data->agg.txq_id; - - switch (tid_data->agg.state) { - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* - * This can happen if the peer stops aggregation - * again before we've had a chance to drain the - * queue we selected previously, i.e. before the - * session was really started completely. - */ - IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); - goto turn_off; - case IWL_AGG_STARTING: - /* - * This can happen when the session is stopped before - * we receive ADDBA response - */ - IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n"); - goto turn_off; - case IWL_AGG_ON: - break; - default: - IWL_WARN(priv, - "Stopping AGG while state not ON or starting for %d on %d (%d)\n", - sta_id, tid, tid_data->agg.state); - spin_unlock_bh(&priv->sta_lock); - return 0; - } - - tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); - - /* There are still packets for this RA / TID in the HW */ - if (!test_bit(txq_id, priv->agg_q_alloc)) { - IWL_DEBUG_TX_QUEUES(priv, - "stopping AGG on STA/TID %d/%d but hwq %d not used\n", - sta_id, tid, txq_id); - } else if (tid_data->agg.ssn != tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, - "Can't proceed: ssn %d, next_recl = %d\n", - tid_data->agg.ssn, - tid_data->next_reclaimed); - tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA; - spin_unlock_bh(&priv->sta_lock); - return 0; - } - - IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", - tid_data->agg.ssn); -turn_off: - agg_state = tid_data->agg.state; - tid_data->agg.state = IWL_AGG_OFF; - - spin_unlock_bh(&priv->sta_lock); - - if (test_bit(txq_id, priv->agg_q_alloc)) { - /* - * If the transport didn't know that we wanted to start - * agreggation, don't tell it that we want to stop them. - * This can happen when we don't get the addBA response on - * time, or we hadn't time to drain the AC queues. - */ - if (agg_state == IWL_AGG_ON) - iwl_trans_txq_disable(priv->trans, txq_id, true); - else - IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", - agg_state); - iwlagn_dealloc_agg_txq(priv, txq_id); - } - - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - - return 0; -} - -int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) -{ - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - struct iwl_tid_data *tid_data; - int sta_id, txq_id, ret; - - IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", - sta->addr, tid); - - sta_id = iwl_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_ERR(priv, "Start AGG on invalid station\n"); - return -ENXIO; - } - if (unlikely(tid >= IWL_MAX_TID_COUNT)) - return -EINVAL; - - if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { - IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); - return -ENXIO; - } - - txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]); - if (txq_id < 0) { - IWL_DEBUG_TX_QUEUES(priv, - "No free aggregation queue for %pM/%d\n", - sta->addr, tid); - return txq_id; - } - - ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); - if (ret) - return ret; - - spin_lock_bh(&priv->sta_lock); - tid_data = &priv->tid_data[sta_id][tid]; - tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); - tid_data->agg.txq_id = txq_id; - - *ssn = tid_data->agg.ssn; - - if (*ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", - tid_data->agg.ssn); - tid_data->agg.state = IWL_AGG_STARTING; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - } else { - IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " - "next_reclaimed = %d\n", - tid_data->agg.ssn, - tid_data->next_reclaimed); - tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; - } - spin_unlock_bh(&priv->sta_lock); - - return ret; -} - -int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - struct iwl_tid_data *tid_data; - enum iwl_agg_state agg_state; - int sta_id, txq_id; - sta_id = iwl_sta_id(sta); - - /* - * First set the agg state to OFF to avoid calling - * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty. - */ - spin_lock_bh(&priv->sta_lock); - - tid_data = &priv->tid_data[sta_id][tid]; - txq_id = tid_data->agg.txq_id; - agg_state = tid_data->agg.state; - IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n", - sta_id, tid, txq_id, tid_data->agg.state); - - tid_data->agg.state = IWL_AGG_OFF; - - spin_unlock_bh(&priv->sta_lock); - - if (iwlagn_txfifo_flush(priv, BIT(txq_id))) - IWL_ERR(priv, "Couldn't flush the AGG queue\n"); - - if (test_bit(txq_id, priv->agg_q_alloc)) { - /* - * If the transport didn't know that we wanted to start - * agreggation, don't tell it that we want to stop them. - * This can happen when we don't get the addBA response on - * time, or we hadn't time to drain the AC queues. - */ - if (agg_state == IWL_AGG_ON) - iwl_trans_txq_disable(priv->trans, txq_id, true); - else - IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", - agg_state); - iwlagn_dealloc_agg_txq(priv, txq_id); - } - - return 0; -} - -int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size) -{ - struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - int q, fifo; - u16 ssn; - - buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); - - spin_lock_bh(&priv->sta_lock); - ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; - q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id; - priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON; - spin_unlock_bh(&priv->sta_lock); - - fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; - - iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid, - buf_size, ssn, 0); - - /* - * If the limit is 0, then it wasn't initialised yet, - * use the default. We can do that since we take the - * minimum below, and we don't want to go above our - * default due to hardware restrictions. - */ - if (sta_priv->max_agg_bufsize == 0) - sta_priv->max_agg_bufsize = - LINK_QUAL_AGG_FRAME_LIMIT_DEF; - - /* - * Even though in theory the peer could have different - * aggregation reorder buffer sizes for different sessions, - * our ucode doesn't allow for that and has a global limit - * for each station. Therefore, use the minimum of all the - * aggregation sessions and our default value. - */ - sta_priv->max_agg_bufsize = - min(sta_priv->max_agg_bufsize, buf_size); - - if (priv->hw_params.use_rts_for_aggregation) { - /* - * switch to RTS/CTS if it is the prefer protection - * method for HT traffic - */ - - sta_priv->lq_sta.lq.general_params.flags |= - LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; - } - priv->agg_tids_count++; - IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", - priv->agg_tids_count); - - sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = - sta_priv->max_agg_bufsize; - - IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", - sta->addr, tid); - - return iwl_send_lq_cmd(priv, ctx, - &sta_priv->lq_sta.lq, CMD_ASYNC, false); -} - -static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) -{ - struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid]; - enum iwl_rxon_context_id ctx; - struct ieee80211_vif *vif; - u8 *addr; - - lockdep_assert_held(&priv->sta_lock); - - addr = priv->stations[sta_id].sta.sta.addr; - ctx = priv->stations[sta_id].ctxid; - vif = priv->contexts[ctx].vif; - - switch (priv->tid_data[sta_id][tid].agg.state) { - case IWL_EMPTYING_HW_QUEUE_DELBA: - /* There are no packets for this RA / TID in the HW any more */ - if (tid_data->agg.ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, - "Can continue DELBA flow ssn = next_recl = %d\n", - tid_data->next_reclaimed); - iwl_trans_txq_disable(priv->trans, - tid_data->agg.txq_id, true); - iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); - tid_data->agg.state = IWL_AGG_OFF; - ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); - } - break; - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* There are no packets for this RA / TID in the HW any more */ - if (tid_data->agg.ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, - "Can continue ADDBA flow ssn = next_recl = %d\n", - tid_data->next_reclaimed); - tid_data->agg.state = IWL_AGG_STARTING; - ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); - } - break; - default: - break; - } -} - -static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - const u8 *addr1) -{ - struct ieee80211_sta *sta; - struct iwl_station_priv *sta_priv; - - rcu_read_lock(); - sta = ieee80211_find_sta(ctx->vif, addr1); - if (sta) { - sta_priv = (void *)sta->drv_priv; - /* avoid atomic ops if this isn't a client */ - if (sta_priv->client && - atomic_dec_return(&sta_priv->pending_frames) == 0) - ieee80211_sta_block_awake(priv->hw, sta, false); - } - rcu_read_unlock(); -} - -/** - * translate ucode response to mac80211 tx status control values - */ -static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, - struct ieee80211_tx_info *info) -{ - struct ieee80211_tx_rate *r = &info->status.rates[0]; - - info->status.antenna = - ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); - if (rate_n_flags & RATE_MCS_HT_MSK) - r->flags |= IEEE80211_TX_RC_MCS; - if (rate_n_flags & RATE_MCS_GF_MSK) - r->flags |= IEEE80211_TX_RC_GREEN_FIELD; - if (rate_n_flags & RATE_MCS_HT40_MSK) - r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (rate_n_flags & RATE_MCS_DUP_MSK) - r->flags |= IEEE80211_TX_RC_DUP_DATA; - if (rate_n_flags & RATE_MCS_SGI_MSK) - r->flags |= IEEE80211_TX_RC_SHORT_GI; - r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); -} - -#ifdef CONFIG_IWLWIFI_DEBUG -const char *iwl_get_tx_fail_reason(u32 status) -{ -#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x -#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x - - switch (status & TX_STATUS_MSK) { - case TX_STATUS_SUCCESS: - return "SUCCESS"; - TX_STATUS_POSTPONE(DELAY); - TX_STATUS_POSTPONE(FEW_BYTES); - TX_STATUS_POSTPONE(BT_PRIO); - TX_STATUS_POSTPONE(QUIET_PERIOD); - TX_STATUS_POSTPONE(CALC_TTAK); - TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); - TX_STATUS_FAIL(SHORT_LIMIT); - TX_STATUS_FAIL(LONG_LIMIT); - TX_STATUS_FAIL(FIFO_UNDERRUN); - TX_STATUS_FAIL(DRAIN_FLOW); - TX_STATUS_FAIL(RFKILL_FLUSH); - TX_STATUS_FAIL(LIFE_EXPIRE); - TX_STATUS_FAIL(DEST_PS); - TX_STATUS_FAIL(HOST_ABORTED); - TX_STATUS_FAIL(BT_RETRY); - TX_STATUS_FAIL(STA_INVALID); - TX_STATUS_FAIL(FRAG_DROPPED); - TX_STATUS_FAIL(TID_DISABLE); - TX_STATUS_FAIL(FIFO_FLUSHED); - TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); - TX_STATUS_FAIL(PASSIVE_NO_RX); - TX_STATUS_FAIL(NO_BEACON_ON_RADAR); - } - - return "UNKNOWN"; - -#undef TX_STATUS_FAIL -#undef TX_STATUS_POSTPONE -} -#endif /* CONFIG_IWLWIFI_DEBUG */ - -static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status) -{ - status &= AGG_TX_STATUS_MSK; - - switch (status) { - case AGG_TX_STATE_UNDERRUN_MSK: - priv->reply_agg_tx_stats.underrun++; - break; - case AGG_TX_STATE_BT_PRIO_MSK: - priv->reply_agg_tx_stats.bt_prio++; - break; - case AGG_TX_STATE_FEW_BYTES_MSK: - priv->reply_agg_tx_stats.few_bytes++; - break; - case AGG_TX_STATE_ABORT_MSK: - priv->reply_agg_tx_stats.abort++; - break; - case AGG_TX_STATE_LAST_SENT_TTL_MSK: - priv->reply_agg_tx_stats.last_sent_ttl++; - break; - case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK: - priv->reply_agg_tx_stats.last_sent_try++; - break; - case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK: - priv->reply_agg_tx_stats.last_sent_bt_kill++; - break; - case AGG_TX_STATE_SCD_QUERY_MSK: - priv->reply_agg_tx_stats.scd_query++; - break; - case AGG_TX_STATE_TEST_BAD_CRC32_MSK: - priv->reply_agg_tx_stats.bad_crc32++; - break; - case AGG_TX_STATE_RESPONSE_MSK: - priv->reply_agg_tx_stats.response++; - break; - case AGG_TX_STATE_DUMP_TX_MSK: - priv->reply_agg_tx_stats.dump_tx++; - break; - case AGG_TX_STATE_DELAY_TX_MSK: - priv->reply_agg_tx_stats.delay_tx++; - break; - default: - priv->reply_agg_tx_stats.unknown++; - break; - } -} - -static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) -{ - return le32_to_cpup((__le32 *)&tx_resp->status + - tx_resp->frame_count) & IEEE80211_MAX_SN; -} - -static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, - struct iwlagn_tx_resp *tx_resp) -{ - struct agg_tx_status *frame_status = &tx_resp->status; - int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> - IWLAGN_TX_RES_TID_POS; - int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> - IWLAGN_TX_RES_RA_POS; - struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg; - u32 status = le16_to_cpu(tx_resp->status.status); - int i; - - WARN_ON(tid == IWL_TID_NON_QOS); - - if (agg->wait_for_ba) - IWL_DEBUG_TX_REPLY(priv, - "got tx response w/o block-ack\n"); - - agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); - agg->wait_for_ba = (tx_resp->frame_count > 1); - - /* - * If the BT kill count is non-zero, we'll get this - * notification again. - */ - if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && - priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); - } - - if (tx_resp->frame_count == 1) - return; - - IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n", - agg->txq_id, - le32_to_cpu(tx_resp->rate_n_flags), - iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count); - - /* Construct bit-map of pending frames within Tx window */ - for (i = 0; i < tx_resp->frame_count; i++) { - u16 fstatus = le16_to_cpu(frame_status[i].status); - u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS; - - if (status & AGG_TX_STATUS_MSK) - iwlagn_count_agg_tx_err_status(priv, fstatus); - - if (status & (AGG_TX_STATE_FEW_BYTES_MSK | - AGG_TX_STATE_ABORT_MSK)) - continue; - - if (status & AGG_TX_STATUS_MSK || retry_cnt > 1) - IWL_DEBUG_TX_REPLY(priv, - "%d: status %s (0x%04x), try-count (0x%01x)\n", - i, - iwl_get_agg_tx_fail_reason(fstatus), - fstatus & AGG_TX_STATUS_MSK, - retry_cnt); - } -} - -#ifdef CONFIG_IWLWIFI_DEBUG -#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x - -const char *iwl_get_agg_tx_fail_reason(u16 status) -{ - status &= AGG_TX_STATUS_MSK; - switch (status) { - case AGG_TX_STATE_TRANSMITTED: - return "SUCCESS"; - AGG_TX_STATE_FAIL(UNDERRUN_MSK); - AGG_TX_STATE_FAIL(BT_PRIO_MSK); - AGG_TX_STATE_FAIL(FEW_BYTES_MSK); - AGG_TX_STATE_FAIL(ABORT_MSK); - AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK); - AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK); - AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK); - AGG_TX_STATE_FAIL(SCD_QUERY_MSK); - AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK); - AGG_TX_STATE_FAIL(RESPONSE_MSK); - AGG_TX_STATE_FAIL(DUMP_TX_MSK); - AGG_TX_STATE_FAIL(DELAY_TX_MSK); - } - - return "UNKNOWN"; -} -#endif /* CONFIG_IWLWIFI_DEBUG */ - -static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status) -{ - status &= TX_STATUS_MSK; - - switch (status) { - case TX_STATUS_POSTPONE_DELAY: - priv->reply_tx_stats.pp_delay++; - break; - case TX_STATUS_POSTPONE_FEW_BYTES: - priv->reply_tx_stats.pp_few_bytes++; - break; - case TX_STATUS_POSTPONE_BT_PRIO: - priv->reply_tx_stats.pp_bt_prio++; - break; - case TX_STATUS_POSTPONE_QUIET_PERIOD: - priv->reply_tx_stats.pp_quiet_period++; - break; - case TX_STATUS_POSTPONE_CALC_TTAK: - priv->reply_tx_stats.pp_calc_ttak++; - break; - case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: - priv->reply_tx_stats.int_crossed_retry++; - break; - case TX_STATUS_FAIL_SHORT_LIMIT: - priv->reply_tx_stats.short_limit++; - break; - case TX_STATUS_FAIL_LONG_LIMIT: - priv->reply_tx_stats.long_limit++; - break; - case TX_STATUS_FAIL_FIFO_UNDERRUN: - priv->reply_tx_stats.fifo_underrun++; - break; - case TX_STATUS_FAIL_DRAIN_FLOW: - priv->reply_tx_stats.drain_flow++; - break; - case TX_STATUS_FAIL_RFKILL_FLUSH: - priv->reply_tx_stats.rfkill_flush++; - break; - case TX_STATUS_FAIL_LIFE_EXPIRE: - priv->reply_tx_stats.life_expire++; - break; - case TX_STATUS_FAIL_DEST_PS: - priv->reply_tx_stats.dest_ps++; - break; - case TX_STATUS_FAIL_HOST_ABORTED: - priv->reply_tx_stats.host_abort++; - break; - case TX_STATUS_FAIL_BT_RETRY: - priv->reply_tx_stats.bt_retry++; - break; - case TX_STATUS_FAIL_STA_INVALID: - priv->reply_tx_stats.sta_invalid++; - break; - case TX_STATUS_FAIL_FRAG_DROPPED: - priv->reply_tx_stats.frag_drop++; - break; - case TX_STATUS_FAIL_TID_DISABLE: - priv->reply_tx_stats.tid_disable++; - break; - case TX_STATUS_FAIL_FIFO_FLUSHED: - priv->reply_tx_stats.fifo_flush++; - break; - case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL: - priv->reply_tx_stats.insuff_cf_poll++; - break; - case TX_STATUS_FAIL_PASSIVE_NO_RX: - priv->reply_tx_stats.fail_hw_drop++; - break; - case TX_STATUS_FAIL_NO_BEACON_ON_RADAR: - priv->reply_tx_stats.sta_color_mismatch++; - break; - default: - priv->reply_tx_stats.unknown++; - break; - } -} - -static void iwlagn_set_tx_status(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - struct iwlagn_tx_resp *tx_resp) -{ - u16 status = le16_to_cpu(tx_resp->status.status); - - info->flags &= ~IEEE80211_TX_CTL_AMPDU; - - info->status.rates[0].count = tx_resp->failure_frame + 1; - info->flags |= iwl_tx_status_to_mac80211(status); - iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), - info); - if (!iwl_is_tx_success(status)) - iwlagn_count_tx_err_status(priv, status); -} - -static void iwl_check_abort_status(struct iwl_priv *priv, - u8 frame_count, u32 status) -{ - if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { - IWL_ERR(priv, "Tx flush command to flush out all frames\n"); - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) - queue_work(priv->workqueue, &priv->tx_flush); - } -} - -void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence); - struct iwlagn_tx_resp *tx_resp = (void *)pkt->data; - struct ieee80211_hdr *hdr; - u32 status = le16_to_cpu(tx_resp->status.status); - u16 ssn = iwlagn_get_scd_ssn(tx_resp); - int tid; - int sta_id; - int freed; - struct ieee80211_tx_info *info; - struct sk_buff_head skbs; - struct sk_buff *skb; - struct iwl_rxon_context *ctx; - bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); - - tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> - IWLAGN_TX_RES_TID_POS; - sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> - IWLAGN_TX_RES_RA_POS; - - spin_lock_bh(&priv->sta_lock); - - if (is_agg) { - WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT || - tid >= IWL_MAX_TID_COUNT); - if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id) - IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id, - priv->tid_data[sta_id][tid].agg.txq_id); - iwl_rx_reply_tx_agg(priv, tx_resp); - } - - __skb_queue_head_init(&skbs); - - if (tx_resp->frame_count == 1) { - u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); - next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10); - - if (is_agg) { - /* If this is an aggregation queue, we can rely on the - * ssn since the wifi sequence number corresponds to - * the index in the TFD ring (%256). - * The seq_ctl is the sequence control of the packet - * to which this Tx response relates. But if there is a - * hole in the bitmap of the BA we received, this Tx - * response may allow to reclaim the hole and all the - * subsequent packets that were already acked. - * In that case, seq_ctl != ssn, and the next packet - * to be reclaimed will be ssn and not seq_ctl. - */ - next_reclaimed = ssn; - } - - if (tid != IWL_TID_NON_QOS) { - priv->tid_data[sta_id][tid].next_reclaimed = - next_reclaimed; - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", - next_reclaimed); - } - - iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); - - iwlagn_check_ratid_empty(priv, sta_id, tid); - freed = 0; - - /* process frames */ - skb_queue_walk(&skbs, skb) { - hdr = (struct ieee80211_hdr *)skb->data; - - if (!ieee80211_is_data_qos(hdr->frame_control)) - priv->last_seq_ctl = tx_resp->seq_ctl; - - info = IEEE80211_SKB_CB(skb); - ctx = info->driver_data[0]; - iwl_trans_free_tx_cmd(priv->trans, - info->driver_data[1]); - - memset(&info->status, 0, sizeof(info->status)); - - if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && - ctx->vif && - ctx->vif->type == NL80211_IFTYPE_STATION) { - /* block and stop all queues */ - priv->passive_no_rx = true; - IWL_DEBUG_TX_QUEUES(priv, - "stop all queues: passive channel\n"); - ieee80211_stop_queues(priv->hw); - - IWL_DEBUG_TX_REPLY(priv, - "TXQ %d status %s (0x%08x) " - "rate_n_flags 0x%x retries %d\n", - txq_id, - iwl_get_tx_fail_reason(status), - status, - le32_to_cpu(tx_resp->rate_n_flags), - tx_resp->failure_frame); - - IWL_DEBUG_TX_REPLY(priv, - "FrameCnt = %d, idx=%d\n", - tx_resp->frame_count, cmd_index); - } - - /* check if BAR is needed */ - if (is_agg && !iwl_is_tx_success(status)) - info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), - tx_resp); - if (!is_agg) - iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); - - freed++; - } - - if (tid != IWL_TID_NON_QOS) { - priv->tid_data[sta_id][tid].next_reclaimed = - next_reclaimed; - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", - next_reclaimed); - } - - if (!is_agg && freed != 1) - IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); - - IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id, - iwl_get_tx_fail_reason(status), status); - - IWL_DEBUG_TX_REPLY(priv, - "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n", - le32_to_cpu(tx_resp->rate_n_flags), - tx_resp->failure_frame, - SEQ_TO_INDEX(sequence), ssn, - le16_to_cpu(tx_resp->seq_ctl)); - } - - iwl_check_abort_status(priv, tx_resp->frame_count, status); - spin_unlock_bh(&priv->sta_lock); - - while (!skb_queue_empty(&skbs)) { - skb = __skb_dequeue(&skbs); - ieee80211_tx_status(priv->hw, skb); - } -} - -/** - * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA - * - * Handles block-acknowledge notification from device, which reports success - * of frames sent via aggregation. - */ -void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; - struct iwl_ht_agg *agg; - struct sk_buff_head reclaimed_skbs; - struct sk_buff *skb; - int sta_id; - int tid; - int freed; - - /* "flow" corresponds to Tx queue */ - u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); - - /* "ssn" is start of block-ack Tx window, corresponds to index - * (in Tx queue's circular buffer) of first TFD/frame in window */ - u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); - - if (scd_flow >= priv->cfg->base_params->num_of_queues) { - IWL_ERR(priv, - "BUG_ON scd_flow is bigger than number of queues\n"); - return; - } - - sta_id = ba_resp->sta_id; - tid = ba_resp->tid; - agg = &priv->tid_data[sta_id][tid].agg; - - spin_lock_bh(&priv->sta_lock); - - if (unlikely(!agg->wait_for_ba)) { - if (unlikely(ba_resp->bitmap)) - IWL_ERR(priv, "Received BA when not expected\n"); - spin_unlock_bh(&priv->sta_lock); - return; - } - - if (unlikely(scd_flow != agg->txq_id)) { - /* - * FIXME: this is a uCode bug which need to be addressed, - * log the information and return for now. - * Since it is can possibly happen very often and in order - * not to fill the syslog, don't use IWL_ERR or IWL_WARN - */ - IWL_DEBUG_TX_QUEUES(priv, - "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", - scd_flow, sta_id, tid, agg->txq_id); - spin_unlock_bh(&priv->sta_lock); - return; - } - - __skb_queue_head_init(&reclaimed_skbs); - - /* Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack window (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). */ - iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn, - &reclaimed_skbs); - - IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " - "sta_id = %d\n", - agg->wait_for_ba, - (u8 *) &ba_resp->sta_addr_lo32, - ba_resp->sta_id); - IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " - "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", - ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl), - (unsigned long long)le64_to_cpu(ba_resp->bitmap), - scd_flow, ba_resp_scd_ssn, ba_resp->txed, - ba_resp->txed_2_done); - - /* Mark that the expected block-ack response arrived */ - agg->wait_for_ba = false; - - /* Sanity check values reported by uCode */ - if (ba_resp->txed_2_done > ba_resp->txed) { - IWL_DEBUG_TX_REPLY(priv, - "bogus sent(%d) and ack(%d) count\n", - ba_resp->txed, ba_resp->txed_2_done); - /* - * set txed_2_done = txed, - * so it won't impact rate scale - */ - ba_resp->txed = ba_resp->txed_2_done; - } - - priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn; - - iwlagn_check_ratid_empty(priv, sta_id, tid); - freed = 0; - - skb_queue_walk(&reclaimed_skbs, skb) { - struct ieee80211_hdr *hdr = (void *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (ieee80211_is_data_qos(hdr->frame_control)) - freed++; - else - WARN_ON_ONCE(1); - - iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); - - memset(&info->status, 0, sizeof(info->status)); - /* Packet was transmitted successfully, failures come as single - * frames because before failing a frame the firmware transmits - * it without aggregation at least once. - */ - info->flags |= IEEE80211_TX_STAT_ACK; - - if (freed == 1) { - /* this is the first skb we deliver in this batch */ - /* put the rate scaling data there */ - info = IEEE80211_SKB_CB(skb); - memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_AMPDU; - info->status.ampdu_ack_len = ba_resp->txed_2_done; - info->status.ampdu_len = ba_resp->txed; - iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, - info); - } - } - - spin_unlock_bh(&priv->sta_lock); - - while (!skb_queue_empty(&reclaimed_skbs)) { - skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(priv->hw, skb); - } -} diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c deleted file mode 100644 index 931a8e4269ef..000000000000 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ /dev/null @@ -1,452 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include - -#include "iwl-io.h" -#include "iwl-agn-hw.h" -#include "iwl-trans.h" -#include "iwl-fh.h" -#include "iwl-op-mode.h" - -#include "dev.h" -#include "agn.h" -#include "calib.h" - -/****************************************************************************** - * - * uCode download functions - * - ******************************************************************************/ - -static inline const struct fw_img * -iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type) -{ - if (ucode_type >= IWL_UCODE_TYPE_MAX) - return NULL; - - return &priv->fw->img[ucode_type]; -} - -/* - * Calibration - */ -static int iwl_set_Xtal_calib(struct iwl_priv *priv) -{ - struct iwl_calib_xtal_freq_cmd cmd; - __le16 *xtal_calib = priv->nvm_data->xtal_calib; - - iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); - cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); - cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); - return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd)); -} - -static int iwl_set_temperature_offset_calib(struct iwl_priv *priv) -{ - struct iwl_calib_temperature_offset_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); - cmd.radio_sensor_offset = priv->nvm_data->raw_temperature; - if (!(cmd.radio_sensor_offset)) - cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; - - IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n", - le16_to_cpu(cmd.radio_sensor_offset)); - return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd)); -} - -static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) -{ - struct iwl_calib_temperature_offset_v2_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); - cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature; - cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature; - if (!cmd.radio_sensor_offset_low) { - IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); - cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; - cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; - } - cmd.burntVoltageRef = priv->nvm_data->calib_voltage; - - IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", - le16_to_cpu(cmd.radio_sensor_offset_high)); - IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n", - le16_to_cpu(cmd.radio_sensor_offset_low)); - IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n", - le16_to_cpu(cmd.burntVoltageRef)); - - return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd)); -} - -static int iwl_send_calib_cfg(struct iwl_priv *priv) -{ - struct iwl_calib_cfg_cmd calib_cfg_cmd; - struct iwl_host_cmd cmd = { - .id = CALIBRATION_CFG_CMD, - .len = { sizeof(struct iwl_calib_cfg_cmd), }, - .data = { &calib_cfg_cmd, }, - }; - - memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); - calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; - calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; - calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; - calib_cfg_cmd.ucd_calib_cfg.flags = - IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; - - return iwl_dvm_send_cmd(priv, &cmd); -} - -int iwl_init_alive_start(struct iwl_priv *priv) -{ - int ret; - - if (priv->lib->bt_params && - priv->lib->bt_params->advanced_bt_coexist) { - /* - * Tell uCode we are ready to perform calibration - * need to perform this before any calibration - * no need to close the envlope since we are going - * to load the runtime uCode later. - */ - ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); - if (ret) - return ret; - - } - - ret = iwl_send_calib_cfg(priv); - if (ret) - return ret; - - /** - * temperature offset calibration is only needed for runtime ucode, - * so prepare the value now. - */ - if (priv->lib->need_temp_offset_calib) { - if (priv->lib->temp_offset_v2) - return iwl_set_temperature_offset_calib_v2(priv); - else - return iwl_set_temperature_offset_calib(priv); - } - - return 0; -} - -static int iwl_send_wimax_coex(struct iwl_priv *priv) -{ - struct iwl_wimax_coex_cmd coex_cmd; - - /* coexistence is disabled */ - memset(&coex_cmd, 0, sizeof(coex_cmd)); - - return iwl_dvm_send_cmd_pdu(priv, - COEX_PRIORITY_TABLE_CMD, 0, - sizeof(coex_cmd), &coex_cmd); -} - -static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { - ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | - (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), - 0, 0, 0, 0, 0, 0, 0 -}; - -void iwl_send_prio_tbl(struct iwl_priv *priv) -{ - struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd; - - memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl, - sizeof(iwl_bt_prio_tbl)); - if (iwl_dvm_send_cmd_pdu(priv, - REPLY_BT_COEX_PRIO_TABLE, 0, - sizeof(prio_tbl_cmd), &prio_tbl_cmd)) - IWL_ERR(priv, "failed to send BT prio tbl command\n"); -} - -int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) -{ - struct iwl_bt_coex_prot_env_cmd env_cmd; - int ret; - - env_cmd.action = action; - env_cmd.type = type; - ret = iwl_dvm_send_cmd_pdu(priv, - REPLY_BT_COEX_PROT_ENV, 0, - sizeof(env_cmd), &env_cmd); - if (ret) - IWL_ERR(priv, "failed to send BT env command\n"); - return ret; -} - -static const u8 iwlagn_default_queue_to_tx_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, -}; - -static const u8 iwlagn_ipan_queue_to_tx_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, - IWL_TX_FIFO_BK_IPAN, - IWL_TX_FIFO_BE_IPAN, - IWL_TX_FIFO_VI_IPAN, - IWL_TX_FIFO_VO_IPAN, - IWL_TX_FIFO_BE_IPAN, - IWL_TX_FIFO_UNUSED, - IWL_TX_FIFO_AUX, -}; - -static int iwl_alive_notify(struct iwl_priv *priv) -{ - const u8 *queue_to_txf; - u8 n_queues; - int ret; - int i; - - iwl_trans_fw_alive(priv->trans, 0); - - if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN && - priv->nvm_data->sku_cap_ipan_enable) { - n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo); - queue_to_txf = iwlagn_ipan_queue_to_tx_fifo; - } else { - n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); - queue_to_txf = iwlagn_default_queue_to_tx_fifo; - } - - for (i = 0; i < n_queues; i++) - if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED) - iwl_trans_ac_txq_enable(priv->trans, i, - queue_to_txf[i], 0); - - priv->passive_no_rx = false; - priv->transport_queue_stop = 0; - - ret = iwl_send_wimax_coex(priv); - if (ret) - return ret; - - if (!priv->lib->no_xtal_calib) { - ret = iwl_set_Xtal_calib(priv); - if (ret) - return ret; - } - - return iwl_send_calib_results(priv); -} - -struct iwl_alive_data { - bool valid; - u8 subtype; -}; - -static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_priv *priv = - container_of(notif_wait, struct iwl_priv, notif_wait); - struct iwl_alive_data *alive_data = data; - struct iwl_alive_resp *palive; - - palive = (void *)pkt->data; - - IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision " - "0x%01X 0x%01X\n", - palive->is_valid, palive->ver_type, - palive->ver_subtype); - - priv->device_pointers.error_event_table = - le32_to_cpu(palive->error_event_table_ptr); - priv->device_pointers.log_event_table = - le32_to_cpu(palive->log_event_table_ptr); - - alive_data->subtype = palive->ver_subtype; - alive_data->valid = palive->is_valid == UCODE_VALID_OK; - - return true; -} - -#define UCODE_ALIVE_TIMEOUT HZ -#define UCODE_CALIB_TIMEOUT (2*HZ) - -int iwl_load_ucode_wait_alive(struct iwl_priv *priv, - enum iwl_ucode_type ucode_type) -{ - struct iwl_notification_wait alive_wait; - struct iwl_alive_data alive_data; - const struct fw_img *fw; - int ret; - enum iwl_ucode_type old_type; - static const u16 alive_cmd[] = { REPLY_ALIVE }; - - fw = iwl_get_ucode_image(priv, ucode_type); - if (WARN_ON(!fw)) - return -EINVAL; - - old_type = priv->cur_ucode; - priv->cur_ucode = ucode_type; - priv->ucode_loaded = false; - - iwl_init_notification_wait(&priv->notif_wait, &alive_wait, - alive_cmd, ARRAY_SIZE(alive_cmd), - iwl_alive_fn, &alive_data); - - ret = iwl_trans_start_fw(priv->trans, fw, false); - if (ret) { - priv->cur_ucode = old_type; - iwl_remove_notification(&priv->notif_wait, &alive_wait); - return ret; - } - - /* - * Some things may run in the background now, but we - * just wait for the ALIVE notification here. - */ - ret = iwl_wait_notification(&priv->notif_wait, &alive_wait, - UCODE_ALIVE_TIMEOUT); - if (ret) { - priv->cur_ucode = old_type; - return ret; - } - - if (!alive_data.valid) { - IWL_ERR(priv, "Loaded ucode is not valid!\n"); - priv->cur_ucode = old_type; - return -EIO; - } - - priv->ucode_loaded = true; - - if (ucode_type != IWL_UCODE_WOWLAN) { - /* delay a bit to give rfkill time to run */ - msleep(5); - } - - ret = iwl_alive_notify(priv); - if (ret) { - IWL_WARN(priv, - "Could not complete ALIVE transition: %d\n", ret); - priv->cur_ucode = old_type; - return ret; - } - - return 0; -} - -static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_priv *priv = data; - struct iwl_calib_hdr *hdr; - - if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) { - WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION); - return true; - } - - hdr = (struct iwl_calib_hdr *)pkt->data; - - if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt))) - IWL_ERR(priv, "Failed to record calibration data %d\n", - hdr->op_code); - - return false; -} - -int iwl_run_init_ucode(struct iwl_priv *priv) -{ - struct iwl_notification_wait calib_wait; - static const u16 calib_complete[] = { - CALIBRATION_RES_NOTIFICATION, - CALIBRATION_COMPLETE_NOTIFICATION - }; - int ret; - - lockdep_assert_held(&priv->mutex); - - /* No init ucode required? Curious, but maybe ok */ - if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) - return 0; - - iwl_init_notification_wait(&priv->notif_wait, &calib_wait, - calib_complete, ARRAY_SIZE(calib_complete), - iwlagn_wait_calib, priv); - - /* Will also start the device */ - ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT); - if (ret) - goto error; - - ret = iwl_init_alive_start(priv); - if (ret) - goto error; - - /* - * Some things may run in the background now, but we - * just wait for the calibration complete notification. - */ - ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, - UCODE_CALIB_TIMEOUT); - - goto out; - - error: - iwl_remove_notification(&priv->notif_wait, &calib_wait); - out: - /* Whatever happened, stop the device */ - iwl_trans_stop_device(priv->trans); - priv->ucode_loaded = false; - - return ret; -} diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c deleted file mode 100644 index 06f6cc08f451..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ /dev/null @@ -1,140 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include "iwl-config.h" -#include "iwl-csr.h" -#include "iwl-agn-hw.h" - -/* Highest firmware API version supported */ -#define IWL1000_UCODE_API_MAX 5 -#define IWL100_UCODE_API_MAX 5 - -/* Oldest version we won't warn about */ -#define IWL1000_UCODE_API_OK 5 -#define IWL100_UCODE_API_OK 5 - -/* Lowest firmware API version supported */ -#define IWL1000_UCODE_API_MIN 1 -#define IWL100_UCODE_API_MIN 5 - -/* EEPROM version */ -#define EEPROM_1000_TX_POWER_VERSION (4) -#define EEPROM_1000_EEPROM_VERSION (0x15C) - -#define IWL1000_FW_PRE "iwlwifi-1000-" -#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" - -#define IWL100_FW_PRE "iwlwifi-100-" -#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" - - -static const struct iwl_base_params iwl1000_base_params = { - .num_of_queues = IWLAGN_NUM_QUEUES, - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, - .max_ll_items = OTP_MAX_LL_ITEMS_1000, - .shadow_ram_support = false, - .led_compensation = 51, - .wd_timeout = IWL_WATCHDOG_DISABLED, - .max_event_log_size = 128, - .scd_chain_ext_wa = true, -}; - -static const struct iwl_ht_params iwl1000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ), -}; - -static const struct iwl_eeprom_params iwl1000_eeprom_params = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_REG_BAND_24_HT40_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - } -}; - -#define IWL_DEVICE_1000 \ - .fw_name_pre = IWL1000_FW_PRE, \ - .ucode_api_max = IWL1000_UCODE_API_MAX, \ - .ucode_api_ok = IWL1000_UCODE_API_OK, \ - .ucode_api_min = IWL1000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_1000, \ - .max_inst_size = IWLAGN_RTC_INST_SIZE, \ - .max_data_size = IWLAGN_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .base_params = &iwl1000_base_params, \ - .eeprom_params = &iwl1000_eeprom_params, \ - .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl1000_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", - IWL_DEVICE_1000, - .ht_params = &iwl1000_ht_params, -}; - -const struct iwl_cfg iwl1000_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", - IWL_DEVICE_1000, -}; - -#define IWL_DEVICE_100 \ - .fw_name_pre = IWL100_FW_PRE, \ - .ucode_api_max = IWL100_UCODE_API_MAX, \ - .ucode_api_ok = IWL100_UCODE_API_OK, \ - .ucode_api_min = IWL100_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_100, \ - .max_inst_size = IWLAGN_RTC_INST_SIZE, \ - .max_data_size = IWLAGN_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .base_params = &iwl1000_base_params, \ - .eeprom_params = &iwl1000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl100_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", - IWL_DEVICE_100, - .ht_params = &iwl1000_ht_params, -}; - -const struct iwl_cfg iwl100_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 100 BG", - IWL_DEVICE_100, -}; - -MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c deleted file mode 100644 index 890b95f497d6..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ /dev/null @@ -1,216 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include "iwl-config.h" -#include "iwl-agn-hw.h" -#include "dvm/commands.h" /* needed for BT for now */ - -/* Highest firmware API version supported */ -#define IWL2030_UCODE_API_MAX 6 -#define IWL2000_UCODE_API_MAX 6 -#define IWL105_UCODE_API_MAX 6 -#define IWL135_UCODE_API_MAX 6 - -/* Oldest version we won't warn about */ -#define IWL2030_UCODE_API_OK 6 -#define IWL2000_UCODE_API_OK 6 -#define IWL105_UCODE_API_OK 6 -#define IWL135_UCODE_API_OK 6 - -/* Lowest firmware API version supported */ -#define IWL2030_UCODE_API_MIN 5 -#define IWL2000_UCODE_API_MIN 5 -#define IWL105_UCODE_API_MIN 5 -#define IWL135_UCODE_API_MIN 5 - -/* EEPROM version */ -#define EEPROM_2000_TX_POWER_VERSION (6) -#define EEPROM_2000_EEPROM_VERSION (0x805) - - -#define IWL2030_FW_PRE "iwlwifi-2030-" -#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" - -#define IWL2000_FW_PRE "iwlwifi-2000-" -#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode" - -#define IWL105_FW_PRE "iwlwifi-105-" -#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode" - -#define IWL135_FW_PRE "iwlwifi-135-" -#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" - -static const struct iwl_base_params iwl2000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, - .max_ll_items = OTP_MAX_LL_ITEMS_2x00, - .shadow_ram_support = true, - .led_compensation = 51, - .wd_timeout = IWL_DEF_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .scd_chain_ext_wa = true, -}; - - -static const struct iwl_base_params iwl2030_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, - .max_ll_items = OTP_MAX_LL_ITEMS_2x00, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .scd_chain_ext_wa = true, -}; - -static const struct iwl_ht_params iwl2000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ), -}; - -static const struct iwl_eeprom_params iwl20x0_eeprom_params = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - }, - .enhanced_txpower = true, -}; - -#define IWL_DEVICE_2000 \ - .fw_name_pre = IWL2000_FW_PRE, \ - .ucode_api_max = IWL2000_UCODE_API_MAX, \ - .ucode_api_ok = IWL2000_UCODE_API_OK, \ - .ucode_api_min = IWL2000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_2000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2000_base_params, \ - .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - - -const struct iwl_cfg iwl2000_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", - IWL_DEVICE_2000, - .ht_params = &iwl2000_ht_params, -}; - -const struct iwl_cfg iwl2000_2bgn_d_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN", - IWL_DEVICE_2000, - .ht_params = &iwl2000_ht_params, -}; - -#define IWL_DEVICE_2030 \ - .fw_name_pre = IWL2030_FW_PRE, \ - .ucode_api_max = IWL2030_UCODE_API_MAX, \ - .ucode_api_ok = IWL2030_UCODE_API_OK, \ - .ucode_api_min = IWL2030_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_2030, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2030_base_params, \ - .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl2030_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", - IWL_DEVICE_2030, - .ht_params = &iwl2000_ht_params, -}; - -#define IWL_DEVICE_105 \ - .fw_name_pre = IWL105_FW_PRE, \ - .ucode_api_max = IWL105_UCODE_API_MAX, \ - .ucode_api_ok = IWL105_UCODE_API_OK, \ - .ucode_api_min = IWL105_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_105, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2000_base_params, \ - .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl105_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", - IWL_DEVICE_105, - .ht_params = &iwl2000_ht_params, -}; - -const struct iwl_cfg iwl105_bgn_d_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 105D BGN", - IWL_DEVICE_105, - .ht_params = &iwl2000_ht_params, -}; - -#define IWL_DEVICE_135 \ - .fw_name_pre = IWL135_FW_PRE, \ - .ucode_api_max = IWL135_UCODE_API_MAX, \ - .ucode_api_ok = IWL135_UCODE_API_OK, \ - .ucode_api_min = IWL135_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_135, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2030_base_params, \ - .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl135_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", - IWL_DEVICE_135, - .ht_params = &iwl2000_ht_params, -}; - -MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); -MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); -MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c deleted file mode 100644 index 724194e23414..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ /dev/null @@ -1,178 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include "iwl-config.h" -#include "iwl-agn-hw.h" -#include "iwl-csr.h" - -/* Highest firmware API version supported */ -#define IWL5000_UCODE_API_MAX 5 -#define IWL5150_UCODE_API_MAX 2 - -/* Oldest version we won't warn about */ -#define IWL5000_UCODE_API_OK 5 -#define IWL5150_UCODE_API_OK 2 - -/* Lowest firmware API version supported */ -#define IWL5000_UCODE_API_MIN 1 -#define IWL5150_UCODE_API_MIN 1 - -/* EEPROM versions */ -#define EEPROM_5000_TX_POWER_VERSION (4) -#define EEPROM_5000_EEPROM_VERSION (0x11A) -#define EEPROM_5050_TX_POWER_VERSION (4) -#define EEPROM_5050_EEPROM_VERSION (0x21E) - -#define IWL5000_FW_PRE "iwlwifi-5000-" -#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" - -#define IWL5150_FW_PRE "iwlwifi-5150-" -#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" - -static const struct iwl_base_params iwl5000_base_params = { - .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, - .led_compensation = 51, - .wd_timeout = IWL_WATCHDOG_DISABLED, - .max_event_log_size = 512, - .scd_chain_ext_wa = true, -}; - -static const struct iwl_ht_params iwl5000_ht_params = { - .ht_greenfield_support = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), -}; - -static const struct iwl_eeprom_params iwl5000_eeprom_params = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, -}; - -#define IWL_DEVICE_5000 \ - .fw_name_pre = IWL5000_FW_PRE, \ - .ucode_api_max = IWL5000_UCODE_API_MAX, \ - .ucode_api_ok = IWL5000_UCODE_API_OK, \ - .ucode_api_min = IWL5000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_5000, \ - .max_inst_size = IWLAGN_RTC_INST_SIZE, \ - .max_data_size = IWLAGN_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_5000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ - .base_params = &iwl5000_base_params, \ - .eeprom_params = &iwl5000_eeprom_params, \ - .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl5300_agn_cfg = { - .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", - IWL_DEVICE_5000, - /* at least EEPROM 0x11A has wrong info */ - .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ - .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ - .ht_params = &iwl5000_ht_params, -}; - -const struct iwl_cfg iwl5100_bgn_cfg = { - .name = "Intel(R) WiFi Link 5100 BGN", - IWL_DEVICE_5000, - .valid_tx_ant = ANT_B, /* .cfg overwrite */ - .valid_rx_ant = ANT_AB, /* .cfg overwrite */ - .ht_params = &iwl5000_ht_params, -}; - -const struct iwl_cfg iwl5100_abg_cfg = { - .name = "Intel(R) WiFi Link 5100 ABG", - IWL_DEVICE_5000, - .valid_tx_ant = ANT_B, /* .cfg overwrite */ - .valid_rx_ant = ANT_AB, /* .cfg overwrite */ -}; - -const struct iwl_cfg iwl5100_agn_cfg = { - .name = "Intel(R) WiFi Link 5100 AGN", - IWL_DEVICE_5000, - .valid_tx_ant = ANT_B, /* .cfg overwrite */ - .valid_rx_ant = ANT_AB, /* .cfg overwrite */ - .ht_params = &iwl5000_ht_params, -}; - -const struct iwl_cfg iwl5350_agn_cfg = { - .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", - .fw_name_pre = IWL5000_FW_PRE, - .ucode_api_max = IWL5000_UCODE_API_MAX, - .ucode_api_ok = IWL5000_UCODE_API_OK, - .ucode_api_min = IWL5000_UCODE_API_MIN, - .device_family = IWL_DEVICE_FAMILY_5000, - .max_inst_size = IWLAGN_RTC_INST_SIZE, - .max_data_size = IWLAGN_RTC_DATA_SIZE, - .nvm_ver = EEPROM_5050_EEPROM_VERSION, - .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, - .base_params = &iwl5000_base_params, - .eeprom_params = &iwl5000_eeprom_params, - .ht_params = &iwl5000_ht_params, - .led_mode = IWL_LED_BLINK, - .internal_wimax_coex = true, -}; - -#define IWL_DEVICE_5150 \ - .fw_name_pre = IWL5150_FW_PRE, \ - .ucode_api_max = IWL5150_UCODE_API_MAX, \ - .ucode_api_ok = IWL5150_UCODE_API_OK, \ - .ucode_api_min = IWL5150_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_5150, \ - .max_inst_size = IWLAGN_RTC_INST_SIZE, \ - .max_data_size = IWLAGN_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_5050_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ - .base_params = &iwl5000_base_params, \ - .eeprom_params = &iwl5000_eeprom_params, \ - .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl5150_agn_cfg = { - .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", - IWL_DEVICE_5150, - .ht_params = &iwl5000_ht_params, - -}; - -const struct iwl_cfg iwl5150_abg_cfg = { - .name = "Intel(R) WiMAX/WiFi Link 5150 ABG", - IWL_DEVICE_5150, -}; - -MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c deleted file mode 100644 index 21b2630763dc..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ /dev/null @@ -1,389 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include "iwl-config.h" -#include "iwl-agn-hw.h" -#include "dvm/commands.h" /* needed for BT for now */ - -/* Highest firmware API version supported */ -#define IWL6000_UCODE_API_MAX 6 -#define IWL6050_UCODE_API_MAX 5 -#define IWL6000G2_UCODE_API_MAX 6 -#define IWL6035_UCODE_API_MAX 6 - -/* Oldest version we won't warn about */ -#define IWL6000_UCODE_API_OK 4 -#define IWL6000G2_UCODE_API_OK 5 -#define IWL6050_UCODE_API_OK 5 -#define IWL6000G2B_UCODE_API_OK 6 -#define IWL6035_UCODE_API_OK 6 - -/* Lowest firmware API version supported */ -#define IWL6000_UCODE_API_MIN 4 -#define IWL6050_UCODE_API_MIN 4 -#define IWL6000G2_UCODE_API_MIN 5 -#define IWL6035_UCODE_API_MIN 6 - -/* EEPROM versions */ -#define EEPROM_6000_TX_POWER_VERSION (4) -#define EEPROM_6000_EEPROM_VERSION (0x423) -#define EEPROM_6050_TX_POWER_VERSION (4) -#define EEPROM_6050_EEPROM_VERSION (0x532) -#define EEPROM_6150_TX_POWER_VERSION (6) -#define EEPROM_6150_EEPROM_VERSION (0x553) -#define EEPROM_6005_TX_POWER_VERSION (6) -#define EEPROM_6005_EEPROM_VERSION (0x709) -#define EEPROM_6030_TX_POWER_VERSION (6) -#define EEPROM_6030_EEPROM_VERSION (0x709) -#define EEPROM_6035_TX_POWER_VERSION (6) -#define EEPROM_6035_EEPROM_VERSION (0x753) - -#define IWL6000_FW_PRE "iwlwifi-6000-" -#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode" - -#define IWL6050_FW_PRE "iwlwifi-6050-" -#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode" - -#define IWL6005_FW_PRE "iwlwifi-6000g2a-" -#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode" - -#define IWL6030_FW_PRE "iwlwifi-6000g2b-" -#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" - -static const struct iwl_base_params iwl6000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, - .max_ll_items = OTP_MAX_LL_ITEMS_6x00, - .shadow_ram_support = true, - .led_compensation = 51, - .wd_timeout = IWL_DEF_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .scd_chain_ext_wa = true, -}; - -static const struct iwl_base_params iwl6050_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, - .max_ll_items = OTP_MAX_LL_ITEMS_6x50, - .shadow_ram_support = true, - .led_compensation = 51, - .wd_timeout = IWL_DEF_WD_TIMEOUT, - .max_event_log_size = 1024, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .scd_chain_ext_wa = true, -}; - -static const struct iwl_base_params iwl6000_g2_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, - .max_ll_items = OTP_MAX_LL_ITEMS_6x00, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .scd_chain_ext_wa = true, -}; - -static const struct iwl_ht_params iwl6000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), -}; - -static const struct iwl_eeprom_params iwl6000_eeprom_params = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - .enhanced_txpower = true, -}; - -#define IWL_DEVICE_6005 \ - .fw_name_pre = IWL6005_FW_PRE, \ - .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ - .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6005, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_6005_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ - .base_params = &iwl6000_g2_base_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl6005_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6005_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG", - IWL_DEVICE_6005, -}; - -const struct iwl_cfg iwl6005_2bg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205 BG", - IWL_DEVICE_6005, -}; - -const struct iwl_cfg iwl6005_2agn_sff_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6005_2agn_d_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6005_2agn_mow1_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6005_2agn_mow2_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; - -#define IWL_DEVICE_6030 \ - .fw_name_pre = IWL6030_FW_PRE, \ - .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ - .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6030, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .base_params = &iwl6000_g2_base_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl6030_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", - IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6030_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG", - IWL_DEVICE_6030, -}; - -const struct iwl_cfg iwl6030_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN", - IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6030_2bg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 BG", - IWL_DEVICE_6030, -}; - -#define IWL_DEVICE_6035 \ - .fw_name_pre = IWL6030_FW_PRE, \ - .ucode_api_max = IWL6035_UCODE_API_MAX, \ - .ucode_api_ok = IWL6035_UCODE_API_OK, \ - .ucode_api_min = IWL6035_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6030, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .base_params = &iwl6000_g2_base_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl6035_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", - IWL_DEVICE_6035, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6035_2agn_sff_cfg = { - .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN", - IWL_DEVICE_6035, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl1030_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", - IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl1030_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1030 BG", - IWL_DEVICE_6030, -}; - -const struct iwl_cfg iwl130_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 130 BGN", - IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, - .rx_with_siso_diversity = true, -}; - -const struct iwl_cfg iwl130_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 130 BG", - IWL_DEVICE_6030, - .rx_with_siso_diversity = true, -}; - -/* - * "i": Internal configuration, use internal Power Amplifier - */ -#define IWL_DEVICE_6000i \ - .fw_name_pre = IWL6000_FW_PRE, \ - .ucode_api_max = IWL6000_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000_UCODE_API_OK, \ - .ucode_api_min = IWL6000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6000i, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ - .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ - .nvm_ver = EEPROM_6000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ - .base_params = &iwl6000_base_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl6000i_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", - IWL_DEVICE_6000i, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6000i_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG", - IWL_DEVICE_6000i, -}; - -const struct iwl_cfg iwl6000i_2bg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6200 BG", - IWL_DEVICE_6000i, -}; - -#define IWL_DEVICE_6050 \ - .fw_name_pre = IWL6050_FW_PRE, \ - .ucode_api_max = IWL6050_UCODE_API_MAX, \ - .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6050, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ - .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ - .nvm_ver = EEPROM_6050_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ - .base_params = &iwl6050_base_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl6050_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", - IWL_DEVICE_6050, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6050_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG", - IWL_DEVICE_6050, -}; - -#define IWL_DEVICE_6150 \ - .fw_name_pre = IWL6050_FW_PRE, \ - .ucode_api_max = IWL6050_UCODE_API_MAX, \ - .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6150, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_6150_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ - .base_params = &iwl6050_base_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K - -const struct iwl_cfg iwl6150_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", - IWL_DEVICE_6150, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6150_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG", - IWL_DEVICE_6150, -}; - -const struct iwl_cfg iwl6000_3agn_cfg = { - .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", - .fw_name_pre = IWL6000_FW_PRE, - .ucode_api_max = IWL6000_UCODE_API_MAX, - .ucode_api_ok = IWL6000_UCODE_API_OK, - .ucode_api_min = IWL6000_UCODE_API_MIN, - .device_family = IWL_DEVICE_FAMILY_6000, - .max_inst_size = IWL60_RTC_INST_SIZE, - .max_data_size = IWL60_RTC_DATA_SIZE, - .nvm_ver = EEPROM_6000_EEPROM_VERSION, - .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, - .base_params = &iwl6000_base_params, - .eeprom_params = &iwl6000_eeprom_params, - .ht_params = &iwl6000_ht_params, - .led_mode = IWL_LED_BLINK, -}; - -MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c deleted file mode 100644 index 1a73c7a1da77..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ /dev/null @@ -1,346 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include "iwl-config.h" -#include "iwl-agn-hw.h" - -/* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 17 - -/* Oldest version we won't warn about */ -#define IWL7260_UCODE_API_OK 13 - -/* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 13 - -/* NVM versions */ -#define IWL7260_NVM_VERSION 0x0a1d -#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ -#define IWL3160_NVM_VERSION 0x709 -#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ -#define IWL3165_NVM_VERSION 0x709 -#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ -#define IWL7265_NVM_VERSION 0x0a1d -#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ -#define IWL7265D_NVM_VERSION 0x0c11 -#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ - -/* DCCM offsets and lengths */ -#define IWL7000_DCCM_OFFSET 0x800000 -#define IWL7260_DCCM_LEN 0x14000 -#define IWL3160_DCCM_LEN 0x10000 -#define IWL7265_DCCM_LEN 0x17A00 - -#define IWL7260_FW_PRE "iwlwifi-7260-" -#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" - -#define IWL3160_FW_PRE "iwlwifi-3160-" -#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" - -#define IWL7265_FW_PRE "iwlwifi-7265-" -#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" - -#define IWL7265D_FW_PRE "iwlwifi-7265D-" -#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" - -#define NVM_HW_SECTION_NUM_FAMILY_7000 0 - -static const struct iwl_base_params iwl7000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, - .num_of_queues = 31, - .pll_cfg_val = 0, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = true, - .pcie_l1_allowed = true, - .apmg_wake_up_wa = true, -}; - -static const struct iwl_tt_params iwl7000_high_temp_tt_params = { - .ct_kill_entry = 118, - .ct_kill_exit = 96, - .ct_kill_duration = 5, - .dynamic_smps_entry = 114, - .dynamic_smps_exit = 110, - .tx_protection_entry = 114, - .tx_protection_exit = 108, - .tx_backoff = { - {.temperature = 112, .backoff = 300}, - {.temperature = 113, .backoff = 800}, - {.temperature = 114, .backoff = 1500}, - {.temperature = 115, .backoff = 3000}, - {.temperature = 116, .backoff = 5000}, - {.temperature = 117, .backoff = 10000}, - }, - .support_ct_kill = true, - .support_dynamic_smps = true, - .support_tx_protection = true, - .support_tx_backoff = true, -}; - -static const struct iwl_ht_params iwl7000_ht_params = { - .stbc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), -}; - -#define IWL_DEVICE_7000 \ - .ucode_api_max = IWL7260_UCODE_API_MAX, \ - .ucode_api_ok = IWL7260_UCODE_API_OK, \ - .ucode_api_min = IWL7260_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_7000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .base_params = &iwl7000_base_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ - .non_shared_ant = ANT_A, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .dccm_offset = IWL7000_DCCM_OFFSET - -const struct iwl_cfg iwl7260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 7260", - .fw_name_pre = IWL7260_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, - .host_interrupt_operation_mode = true, - .lp_xtal_workaround = true, - .dccm_len = IWL7260_DCCM_LEN, -}; - -const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { - .name = "Intel(R) Dual Band Wireless AC 7260", - .fw_name_pre = IWL7260_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, - .high_temp = true, - .host_interrupt_operation_mode = true, - .lp_xtal_workaround = true, - .dccm_len = IWL7260_DCCM_LEN, - .thermal_params = &iwl7000_high_temp_tt_params, -}; - -const struct iwl_cfg iwl7260_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 7260", - .fw_name_pre = IWL7260_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, - .host_interrupt_operation_mode = true, - .lp_xtal_workaround = true, - .dccm_len = IWL7260_DCCM_LEN, -}; - -const struct iwl_cfg iwl7260_n_cfg = { - .name = "Intel(R) Wireless N 7260", - .fw_name_pre = IWL7260_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, - .host_interrupt_operation_mode = true, - .lp_xtal_workaround = true, - .dccm_len = IWL7260_DCCM_LEN, -}; - -const struct iwl_cfg iwl3160_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 3160", - .fw_name_pre = IWL3160_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3160_NVM_VERSION, - .nvm_calib_ver = IWL3160_TX_POWER_VERSION, - .host_interrupt_operation_mode = true, - .dccm_len = IWL3160_DCCM_LEN, -}; - -const struct iwl_cfg iwl3160_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 3160", - .fw_name_pre = IWL3160_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3160_NVM_VERSION, - .nvm_calib_ver = IWL3160_TX_POWER_VERSION, - .host_interrupt_operation_mode = true, - .dccm_len = IWL3160_DCCM_LEN, -}; - -const struct iwl_cfg iwl3160_n_cfg = { - .name = "Intel(R) Wireless N 3160", - .fw_name_pre = IWL3160_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3160_NVM_VERSION, - .nvm_calib_ver = IWL3160_TX_POWER_VERSION, - .host_interrupt_operation_mode = true, - .dccm_len = IWL3160_DCCM_LEN, -}; - -static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { - {.pwr = 1600, .backoff = 0}, - {.pwr = 1300, .backoff = 467}, - {.pwr = 900, .backoff = 1900}, - {.pwr = 800, .backoff = 2630}, - {.pwr = 700, .backoff = 3720}, - {.pwr = 600, .backoff = 5550}, - {.pwr = 500, .backoff = 9350}, - {0}, -}; - -static const struct iwl_ht_params iwl7265_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), -}; - -const struct iwl_cfg iwl3165_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 3165", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3165_NVM_VERSION, - .nvm_calib_ver = IWL3165_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 7265", - .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 7265", - .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265_n_cfg = { - .name = "Intel(R) Wireless N 7265", - .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265d_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 7265", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265D_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265d_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 7265", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265D_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265d_n_cfg = { - .name = "Intel(R) Wireless N 7265", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265D_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c deleted file mode 100644 index 0116e5a4c393..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ /dev/null @@ -1,229 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include "iwl-config.h" -#include "iwl-agn-hw.h" - -/* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 17 - -/* Oldest version we won't warn about */ -#define IWL8000_UCODE_API_OK 13 - -/* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 13 - -/* NVM versions */ -#define IWL8000_NVM_VERSION 0x0a1d -#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ - -/* Memory offsets and lengths */ -#define IWL8260_DCCM_OFFSET 0x800000 -#define IWL8260_DCCM_LEN 0x18000 -#define IWL8260_DCCM2_OFFSET 0x880000 -#define IWL8260_DCCM2_LEN 0x8000 -#define IWL8260_SMEM_OFFSET 0x400000 -#define IWL8260_SMEM_LEN 0x68000 - -#define IWL8000_FW_PRE "iwlwifi-8000" -#define IWL8000_MODULE_FIRMWARE(api) \ - IWL8000_FW_PRE "-" __stringify(api) ".ucode" - -#define NVM_HW_SECTION_NUM_FAMILY_8000 10 -#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" -#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" - -/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */ -#define MAX_RX_AGG_SIZE_8260_SDIO 21 -#define MAX_TX_AGG_SIZE_8260_SDIO 40 - -/* Max A-MPDU exponent for HT and VHT */ -#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K -#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K - -static const struct iwl_base_params iwl8000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, - .num_of_queues = 31, - .pll_cfg_val = 0, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = true, - .pcie_l1_allowed = true, -}; - -static const struct iwl_ht_params iwl8000_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), -}; - -static const struct iwl_tt_params iwl8000_tt_params = { - .ct_kill_entry = 115, - .ct_kill_exit = 93, - .ct_kill_duration = 5, - .dynamic_smps_entry = 111, - .dynamic_smps_exit = 107, - .tx_protection_entry = 112, - .tx_protection_exit = 105, - .tx_backoff = { - {.temperature = 110, .backoff = 200}, - {.temperature = 111, .backoff = 600}, - {.temperature = 112, .backoff = 1200}, - {.temperature = 113, .backoff = 2000}, - {.temperature = 114, .backoff = 4000}, - }, - .support_ct_kill = true, - .support_dynamic_smps = true, - .support_tx_protection = true, - .support_tx_backoff = true, -}; - -#define IWL_DEVICE_8000 \ - .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ - .ucode_api_min = IWL8000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_8000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .base_params = &iwl8000_base_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ - .d0i3 = true, \ - .features = NETIF_F_RXCSUM, \ - .non_shared_ant = ANT_A, \ - .dccm_offset = IWL8260_DCCM_OFFSET, \ - .dccm_len = IWL8260_DCCM_LEN, \ - .dccm2_offset = IWL8260_DCCM2_OFFSET, \ - .dccm2_len = IWL8260_DCCM2_LEN, \ - .smem_offset = IWL8260_SMEM_OFFSET, \ - .smem_len = IWL8260_SMEM_LEN, \ - .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \ - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ - .thermal_params = &iwl8000_tt_params, \ - .apmg_not_supported = true - -const struct iwl_cfg iwl8260_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 8260", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8000, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, -}; - -const struct iwl_cfg iwl8260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 8260", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8000, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwl4165_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 4165", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8000, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwl8260_2ac_sdio_cfg = { - .name = "Intel(R) Dual Band Wireless-AC 8260", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8000, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, - .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, - .disable_dummy_notification = true, - .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, - .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, -}; - -const struct iwl_cfg iwl4165_2ac_sdio_cfg = { - .name = "Intel(R) Dual Band Wireless-AC 4165", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8000, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, - .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, - .bt_shared_single_ant = true, - .disable_dummy_notification = true, - .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, - .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, -}; - -MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h deleted file mode 100644 index 04a483d38659..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ /dev/null @@ -1,117 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -/* - * Please use this file (iwl-agn-hw.h) only for hardware-related definitions. - */ - -#ifndef __iwl_agn_hw_h__ -#define __iwl_agn_hw_h__ - -#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000) -#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000) - -#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000) -#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000) - -#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \ - IWLAGN_RTC_INST_LOWER_BOUND) -#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \ - IWLAGN_RTC_DATA_LOWER_BOUND) - -#define IWL60_RTC_INST_LOWER_BOUND (0x000000) -#define IWL60_RTC_INST_UPPER_BOUND (0x040000) -#define IWL60_RTC_DATA_LOWER_BOUND (0x800000) -#define IWL60_RTC_DATA_UPPER_BOUND (0x814000) -#define IWL60_RTC_INST_SIZE \ - (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND) -#define IWL60_RTC_DATA_SIZE \ - (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND) - -/* RSSI to dBm */ -#define IWLAGN_RSSI_OFFSET 44 - -#define IWLAGN_DEFAULT_TX_RETRY 15 -#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3 -#define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60 -#define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60 -#define IWLAGN_LOW_RETRY_LIMIT 7 - -/* Limit range of txpower output target to be between these values */ -#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ -#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ - -/* EEPROM */ -#define IWLAGN_EEPROM_IMG_SIZE 2048 - -/* high blocks contain PAPD data */ -#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ -#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ -#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ -#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ -#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ -#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ - - -#define IWLAGN_NUM_QUEUES 20 - -#endif /* __iwl_agn_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h deleted file mode 100644 index 910970858f98..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ /dev/null @@ -1,437 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __IWL_CONFIG_H__ -#define __IWL_CONFIG_H__ - -#include -#include - - -enum iwl_device_family { - IWL_DEVICE_FAMILY_UNDEFINED, - IWL_DEVICE_FAMILY_1000, - IWL_DEVICE_FAMILY_100, - IWL_DEVICE_FAMILY_2000, - IWL_DEVICE_FAMILY_2030, - IWL_DEVICE_FAMILY_105, - IWL_DEVICE_FAMILY_135, - IWL_DEVICE_FAMILY_5000, - IWL_DEVICE_FAMILY_5150, - IWL_DEVICE_FAMILY_6000, - IWL_DEVICE_FAMILY_6000i, - IWL_DEVICE_FAMILY_6005, - IWL_DEVICE_FAMILY_6030, - IWL_DEVICE_FAMILY_6050, - IWL_DEVICE_FAMILY_6150, - IWL_DEVICE_FAMILY_7000, - IWL_DEVICE_FAMILY_8000, -}; - -static inline bool iwl_has_secure_boot(u32 hw_rev, - enum iwl_device_family family) -{ - /* return 1 only for family 8000 B0 */ - if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC)) - return true; - - return false; -} - -/* - * LED mode - * IWL_LED_DEFAULT: use device default - * IWL_LED_RF_STATE: turn LED on/off based on RF state - * LED ON = RF ON - * LED OFF = RF OFF - * IWL_LED_BLINK: adjust led blink rate based on blink table - * IWL_LED_DISABLE: led disabled - */ -enum iwl_led_mode { - IWL_LED_DEFAULT, - IWL_LED_RF_STATE, - IWL_LED_BLINK, - IWL_LED_DISABLE, -}; - -/* - * This is the threshold value of plcp error rate per 100mSecs. It is - * used to set and check for the validity of plcp_delta. - */ -#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1 -#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50 -#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100 -#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200 -#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255 -#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 - -/* TX queue watchdog timeouts in mSecs */ -#define IWL_WATCHDOG_DISABLED 0 -#define IWL_DEF_WD_TIMEOUT 2500 -#define IWL_LONG_WD_TIMEOUT 10000 -#define IWL_MAX_WD_TIMEOUT 120000 - -#define IWL_DEFAULT_MAX_TX_POWER 22 - -/* Antenna presence definitions */ -#define ANT_NONE 0x0 -#define ANT_A BIT(0) -#define ANT_B BIT(1) -#define ANT_C BIT(2) -#define ANT_AB (ANT_A | ANT_B) -#define ANT_AC (ANT_A | ANT_C) -#define ANT_BC (ANT_B | ANT_C) -#define ANT_ABC (ANT_A | ANT_B | ANT_C) - -static inline u8 num_of_ant(u8 mask) -{ - return !!((mask) & ANT_A) + - !!((mask) & ANT_B) + - !!((mask) & ANT_C); -} - -/* - * @max_ll_items: max number of OTP blocks - * @shadow_ram_support: shadow support for OTP memory - * @led_compensation: compensate on the led on/off time per HW according - * to the deviation to achieve the desired led frequency. - * The detail algorithm is described in iwl-led.c - * @wd_timeout: TX queues watchdog timeout - * @max_event_log_size: size of event log buffer size for ucode event logging - * @shadow_reg_enable: HW shadow register support - * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command - * is in flight. This is due to a HW bug in 7260, 3160 and 7265. - * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. - */ -struct iwl_base_params { - int eeprom_size; - int num_of_queues; /* def: HW dependent */ - /* for iwl_pcie_apm_init() */ - u32 pll_cfg_val; - - const u16 max_ll_items; - const bool shadow_ram_support; - u16 led_compensation; - unsigned int wd_timeout; - u32 max_event_log_size; - const bool shadow_reg_enable; - const bool pcie_l1_allowed; - const bool apmg_wake_up_wa; - const bool scd_chain_ext_wa; -}; - -/* - * @stbc: support Tx STBC and 1*SS Rx STBC - * @ldpc: support Tx/Rx with LDPC - * @use_rts_for_aggregation: use rts/cts protection for HT traffic - * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40 - */ -struct iwl_ht_params { - enum ieee80211_smps_mode smps_mode; - const bool ht_greenfield_support; /* if used set to true */ - const bool stbc; - const bool ldpc; - bool use_rts_for_aggregation; - u8 ht40_bands; -}; - -/* - * Tx-backoff threshold - * @temperature: The threshold in Celsius - * @backoff: The tx-backoff in uSec - */ -struct iwl_tt_tx_backoff { - s32 temperature; - u32 backoff; -}; - -#define TT_TX_BACKOFF_SIZE 6 - -/** - * struct iwl_tt_params - thermal throttling parameters - * @ct_kill_entry: CT Kill entry threshold - * @ct_kill_exit: CT Kill exit threshold - * @ct_kill_duration: The time intervals (in uSec) in which the driver needs - * to checks whether to exit CT Kill. - * @dynamic_smps_entry: Dynamic SMPS entry threshold - * @dynamic_smps_exit: Dynamic SMPS exit threshold - * @tx_protection_entry: TX protection entry threshold - * @tx_protection_exit: TX protection exit threshold - * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. - * @support_ct_kill: Support CT Kill? - * @support_dynamic_smps: Support dynamic SMPS? - * @support_tx_protection: Support tx protection? - * @support_tx_backoff: Support tx-backoff? - */ -struct iwl_tt_params { - u32 ct_kill_entry; - u32 ct_kill_exit; - u32 ct_kill_duration; - u32 dynamic_smps_entry; - u32 dynamic_smps_exit; - u32 tx_protection_entry; - u32 tx_protection_exit; - struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; - bool support_ct_kill; - bool support_dynamic_smps; - bool support_tx_protection; - bool support_tx_backoff; -}; - -/* - * information on how to parse the EEPROM - */ -#define EEPROM_REG_BAND_1_CHANNELS 0x08 -#define EEPROM_REG_BAND_2_CHANNELS 0x26 -#define EEPROM_REG_BAND_3_CHANNELS 0x42 -#define EEPROM_REG_BAND_4_CHANNELS 0x5C -#define EEPROM_REG_BAND_5_CHANNELS 0x74 -#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82 -#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92 -#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 -#define EEPROM_REGULATORY_BAND_NO_HT40 0 - -/* lower blocks contain EEPROM image and calibration data */ -#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ -#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */ -#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */ - -struct iwl_eeprom_params { - const u8 regulatory_bands[7]; - bool enhanced_txpower; -}; - -/* Tx-backoff power threshold - * @pwr: The power limit in mw - * @backoff: The tx-backoff in uSec - */ -struct iwl_pwr_tx_backoff { - u32 pwr; - u32 backoff; -}; - -/** - * struct iwl_cfg - * @name: Official name of the device - * @fw_name_pre: Firmware filename prefix. The api version and extension - * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre.ucode. - * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_ok: oldest version of the uCode API that is OK to load - * without a warning, for use in transitions - * @ucode_api_min: Lowest version of uCode API supported by driver. - * @max_inst_size: The maximal length of the fw inst section - * @max_data_size: The maximal length of the fw data section - * @valid_tx_ant: valid transmit antenna - * @valid_rx_ant: valid receive antenna - * @non_shared_ant: the antenna that is for WiFi only - * @nvm_ver: NVM version - * @nvm_calib_ver: NVM calibration version - * @lib: pointer to the lib ops - * @base_params: pointer to basic parameters - * @ht_params: point to ht parameters - * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * @rx_with_siso_diversity: 1x1 device with rx antenna diversity - * @internal_wimax_coex: internal wifi/wimax combo device - * @high_temp: Is this NIC is designated to be in high temperature. - * @host_interrupt_operation_mode: device needs host interrupt operation - * mode set - * @d0i3: device uses d0i3 instead of d3 - * @nvm_hw_section_num: the ID of the HW NVM section - * @features: hw features, any combination of feature_whitelist - * @pwr_tx_backoffs: translation table between power limits and backoffs - * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response - * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response - * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the - * station can receive in HT - * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the - * station can receive in VHT - * @dccm_offset: offset from which DCCM begins - * @dccm_len: length of DCCM (including runtime stack CCM) - * @dccm2_offset: offset from which the second DCCM begins - * @dccm2_len: length of the second DCCM - * @smem_offset: offset from which the SMEM begins - * @smem_len: the length of SMEM - * - * We enable the driver to be backward compatible wrt. hardware features. - * API differences in uCode shouldn't be handled here but through TLVs - * and/or the uCode API version instead. - */ -struct iwl_cfg { - /* params specific to an individual device within a device family */ - const char *name; - const char *fw_name_pre; - const unsigned int ucode_api_max; - const unsigned int ucode_api_ok; - const unsigned int ucode_api_min; - const enum iwl_device_family device_family; - const u32 max_data_size; - const u32 max_inst_size; - u8 valid_tx_ant; - u8 valid_rx_ant; - u8 non_shared_ant; - bool bt_shared_single_ant; - u16 nvm_ver; - u16 nvm_calib_ver; - /* params not likely to change within a device family */ - const struct iwl_base_params *base_params; - /* params likely to change within a device family */ - const struct iwl_ht_params *ht_params; - const struct iwl_eeprom_params *eeprom_params; - enum iwl_led_mode led_mode; - const bool rx_with_siso_diversity; - const bool internal_wimax_coex; - const bool host_interrupt_operation_mode; - bool high_temp; - bool d0i3; - u8 nvm_hw_section_num; - bool lp_xtal_workaround; - const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; - bool no_power_up_nic_in_init; - const char *default_nvm_file_B_step; - const char *default_nvm_file_C_step; - netdev_features_t features; - unsigned int max_rx_agg_size; - bool disable_dummy_notification; - unsigned int max_tx_agg_size; - unsigned int max_ht_ampdu_exponent; - unsigned int max_vht_ampdu_exponent; - const u32 dccm_offset; - const u32 dccm_len; - const u32 dccm2_offset; - const u32 dccm2_len; - const u32 smem_offset; - const u32 smem_len; - const struct iwl_tt_params *thermal_params; - bool apmg_not_supported; -}; - -/* - * This list declares the config structures for all devices. - */ -#if IS_ENABLED(CONFIG_IWLDVM) -extern const struct iwl_cfg iwl5300_agn_cfg; -extern const struct iwl_cfg iwl5100_agn_cfg; -extern const struct iwl_cfg iwl5350_agn_cfg; -extern const struct iwl_cfg iwl5100_bgn_cfg; -extern const struct iwl_cfg iwl5100_abg_cfg; -extern const struct iwl_cfg iwl5150_agn_cfg; -extern const struct iwl_cfg iwl5150_abg_cfg; -extern const struct iwl_cfg iwl6005_2agn_cfg; -extern const struct iwl_cfg iwl6005_2abg_cfg; -extern const struct iwl_cfg iwl6005_2bg_cfg; -extern const struct iwl_cfg iwl6005_2agn_sff_cfg; -extern const struct iwl_cfg iwl6005_2agn_d_cfg; -extern const struct iwl_cfg iwl6005_2agn_mow1_cfg; -extern const struct iwl_cfg iwl6005_2agn_mow2_cfg; -extern const struct iwl_cfg iwl1030_bgn_cfg; -extern const struct iwl_cfg iwl1030_bg_cfg; -extern const struct iwl_cfg iwl6030_2agn_cfg; -extern const struct iwl_cfg iwl6030_2abg_cfg; -extern const struct iwl_cfg iwl6030_2bgn_cfg; -extern const struct iwl_cfg iwl6030_2bg_cfg; -extern const struct iwl_cfg iwl6000i_2agn_cfg; -extern const struct iwl_cfg iwl6000i_2abg_cfg; -extern const struct iwl_cfg iwl6000i_2bg_cfg; -extern const struct iwl_cfg iwl6000_3agn_cfg; -extern const struct iwl_cfg iwl6050_2agn_cfg; -extern const struct iwl_cfg iwl6050_2abg_cfg; -extern const struct iwl_cfg iwl6150_bgn_cfg; -extern const struct iwl_cfg iwl6150_bg_cfg; -extern const struct iwl_cfg iwl1000_bgn_cfg; -extern const struct iwl_cfg iwl1000_bg_cfg; -extern const struct iwl_cfg iwl100_bgn_cfg; -extern const struct iwl_cfg iwl100_bg_cfg; -extern const struct iwl_cfg iwl130_bgn_cfg; -extern const struct iwl_cfg iwl130_bg_cfg; -extern const struct iwl_cfg iwl2000_2bgn_cfg; -extern const struct iwl_cfg iwl2000_2bgn_d_cfg; -extern const struct iwl_cfg iwl2030_2bgn_cfg; -extern const struct iwl_cfg iwl6035_2agn_cfg; -extern const struct iwl_cfg iwl6035_2agn_sff_cfg; -extern const struct iwl_cfg iwl105_bgn_cfg; -extern const struct iwl_cfg iwl105_bgn_d_cfg; -extern const struct iwl_cfg iwl135_bgn_cfg; -#endif /* CONFIG_IWLDVM */ -#if IS_ENABLED(CONFIG_IWLMVM) -extern const struct iwl_cfg iwl7260_2ac_cfg; -extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; -extern const struct iwl_cfg iwl7260_2n_cfg; -extern const struct iwl_cfg iwl7260_n_cfg; -extern const struct iwl_cfg iwl3160_2ac_cfg; -extern const struct iwl_cfg iwl3160_2n_cfg; -extern const struct iwl_cfg iwl3160_n_cfg; -extern const struct iwl_cfg iwl3165_2ac_cfg; -extern const struct iwl_cfg iwl7265_2ac_cfg; -extern const struct iwl_cfg iwl7265_2n_cfg; -extern const struct iwl_cfg iwl7265_n_cfg; -extern const struct iwl_cfg iwl7265d_2ac_cfg; -extern const struct iwl_cfg iwl7265d_2n_cfg; -extern const struct iwl_cfg iwl7265d_n_cfg; -extern const struct iwl_cfg iwl8260_2n_cfg; -extern const struct iwl_cfg iwl8260_2ac_cfg; -extern const struct iwl_cfg iwl4165_2ac_cfg; -extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; -extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; -#endif /* CONFIG_IWLMVM */ - -#endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h deleted file mode 100644 index 543abeaffcf0..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ /dev/null @@ -1,552 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_csr_h__ -#define __iwl_csr_h__ -/* - * CSR (control and status registers) - * - * CSR registers are mapped directly into PCI bus space, and are accessible - * whenever platform supplies power to device, even when device is in - * low power states due to driver-invoked device resets - * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. - * - * Use iwl_write32() and iwl_read32() family to access these registers; - * these provide simple PCI bus access, without waking up the MAC. - * Do not use iwl_write_direct32() family for these registers; - * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. - * The MAC (uCode processor, etc.) does not need to be powered up for accessing - * the CSR registers. - * - * NOTE: Device does need to be awake in order to read this memory - * via CSR_EEPROM and CSR_OTP registers - */ -#define CSR_BASE (0x000) - -#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ -#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ -#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ -#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ -#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ -#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ -#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ -#define CSR_GP_CNTRL (CSR_BASE+0x024) - -/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ -#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) - -/* - * Hardware revision info - * Bit fields: - * 31-16: Reserved - * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions - * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D - * 1-0: "Dash" (-) value, as in A-1, etc. - */ -#define CSR_HW_REV (CSR_BASE+0x028) - -/* - * EEPROM and OTP (one-time-programmable) memory reads - * - * NOTE: Device must be awake, initialized via apm_ops.init(), - * in order to read. - */ -#define CSR_EEPROM_REG (CSR_BASE+0x02c) -#define CSR_EEPROM_GP (CSR_BASE+0x030) -#define CSR_OTP_GP_REG (CSR_BASE+0x034) - -#define CSR_GIO_REG (CSR_BASE+0x03C) -#define CSR_GP_UCODE_REG (CSR_BASE+0x048) -#define CSR_GP_DRIVER_REG (CSR_BASE+0x050) - -/* - * UCODE-DRIVER GP (general purpose) mailbox registers. - * SET/CLR registers set/clear bit(s) if "1" is written. - */ -#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) -#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) -#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) -#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) - -#define CSR_MBOX_SET_REG (CSR_BASE + 0x88) - -#define CSR_LED_REG (CSR_BASE+0x094) -#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) -#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */ - - -/* GIO Chicken Bits (PCI Express bus link power management) */ -#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) - -/* Analog phase-lock-loop configuration */ -#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) - -/* - * CSR HW resources monitor registers - */ -#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214) -#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228) -#define CSR_MONITOR_XTAL_RESOURCES (0x00000010) - -/* - * CSR Hardware Revision Workaround Register. Indicates hardware rev; - * "step" determines CCK backoff for txpower calculation. Used for 4965 only. - * See also CSR_HW_REV register. - * Bit fields: - * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step - * 1-0: "Dash" (-) value, as in C-1, etc. - */ -#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) - -#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) -#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) - -/* Bits for CSR_HW_IF_CONFIG_REG */ -#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) -#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) -#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) -#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) -#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) -#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00) -#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000) -#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000) - -#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0) -#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2) -#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6) -#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10) -#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12) -#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14) - -#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) -#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ -#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ -#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000) -#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ - -#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) - -#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ -#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ - -/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), - * acknowledged (reset) by host writing "1" to flagged bits. */ -#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ -#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ -#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ -#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ -#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ -#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ -#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */ -#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ -#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ -#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ -#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ -#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ - -#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ - CSR_INT_BIT_HW_ERR | \ - CSR_INT_BIT_FH_TX | \ - CSR_INT_BIT_SW_ERR | \ - CSR_INT_BIT_PAGING | \ - CSR_INT_BIT_RF_KILL | \ - CSR_INT_BIT_SW_RX | \ - CSR_INT_BIT_WAKEUP | \ - CSR_INT_BIT_ALIVE | \ - CSR_INT_BIT_RX_PERIODIC) - -/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ -#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ -#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ -#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ -#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ -#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ -#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ - -#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ - CSR_FH_INT_BIT_RX_CHNL1 | \ - CSR_FH_INT_BIT_RX_CHNL0) - -#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ - CSR_FH_INT_BIT_TX_CHNL0) - -/* GPIO */ -#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) -#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) -#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) - -/* RESET */ -#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) -#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) -#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) -#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) -#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) -#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) - -/* - * GP (general purpose) CONTROL REGISTER - * Bit fields: - * 27: HW_RF_KILL_SW - * Indicates state of (platform's) hardware RF-Kill switch - * 26-24: POWER_SAVE_TYPE - * Indicates current power-saving mode: - * 000 -- No power saving - * 001 -- MAC power-down - * 010 -- PHY (radio) power-down - * 011 -- Error - * 10: XTAL ON request - * 9-6: SYS_CONFIG - * Indicates current system configuration, reflecting pins on chip - * as forced high/low by device circuit board. - * 4: GOING_TO_SLEEP - * Indicates MAC is entering a power-saving sleep power-down. - * Not a good time to access device-internal resources. - * 3: MAC_ACCESS_REQ - * Host sets this to request and maintain MAC wakeup, to allow host - * access to device-internal resources. Host must wait for - * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR - * device registers. - * 2: INIT_DONE - * Host sets this to put device into fully operational D0 power mode. - * Host resets this after SW_RESET to put device into low power mode. - * 0: MAC_CLOCK_READY - * Indicates MAC (ucode processor, etc.) is powered up and can run. - * Internal resources are accessible. - * NOTE: This does not indicate that the processor is actually running. - * NOTE: This does not indicate that device has completed - * init or post-power-down restore of internal SRAM memory. - * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that - * SRAM is restored and uCode is in normal operation mode. - * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and - * do not need to save/restore it. - * NOTE: After device reset, this bit remains "0" until host sets - * INIT_DONE - */ -#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) -#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) -#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) -#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) -#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) - -#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) - -#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) -#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) -#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) - - -/* HW REV */ -#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) -#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2) - - -/** - * hw_rev values - */ -enum { - SILICON_A_STEP = 0, - SILICON_B_STEP, - SILICON_C_STEP, -}; - - -#define CSR_HW_REV_TYPE_MSK (0x000FFF0) -#define CSR_HW_REV_TYPE_5300 (0x0000020) -#define CSR_HW_REV_TYPE_5350 (0x0000030) -#define CSR_HW_REV_TYPE_5100 (0x0000050) -#define CSR_HW_REV_TYPE_5150 (0x0000040) -#define CSR_HW_REV_TYPE_1000 (0x0000060) -#define CSR_HW_REV_TYPE_6x00 (0x0000070) -#define CSR_HW_REV_TYPE_6x50 (0x0000080) -#define CSR_HW_REV_TYPE_6150 (0x0000084) -#define CSR_HW_REV_TYPE_6x05 (0x00000B0) -#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 -#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 -#define CSR_HW_REV_TYPE_2x30 (0x00000C0) -#define CSR_HW_REV_TYPE_2x00 (0x0000100) -#define CSR_HW_REV_TYPE_105 (0x0000110) -#define CSR_HW_REV_TYPE_135 (0x0000120) -#define CSR_HW_REV_TYPE_7265D (0x0000210) -#define CSR_HW_REV_TYPE_NONE (0x00001F0) - -/* EEPROM REG */ -#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) -#define CSR_EEPROM_REG_BIT_CMD (0x00000002) -#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) -#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) - -/* EEPROM GP */ -#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ -#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) -#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) -#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001) -#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) -#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) - -/* One-time-programmable memory general purpose reg */ -#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ -#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ -#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ -#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ - -/* GP REG */ -#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ -#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) -#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) -#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) -#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) - - -/* CSR GIO */ -#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) - -/* - * UCODE-DRIVER GP (general purpose) mailbox register 1 - * Host driver and uCode write and/or read this register to communicate with - * each other. - * Bit fields: - * 4: UCODE_DISABLE - * Host sets this to request permanent halt of uCode, same as - * sending CARD_STATE command with "halt" bit set. - * 3: CT_KILL_EXIT - * Host sets this to request exit from CT_KILL state, i.e. host thinks - * device temperature is low enough to continue normal operation. - * 2: CMD_BLOCKED - * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) - * to release uCode to clear all Tx and command queues, enter - * unassociated mode, and power down. - * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. - * 1: SW_BIT_RFKILL - * Host sets this when issuing CARD_STATE command to request - * device sleep. - * 0: MAC_SLEEP - * uCode sets this when preparing a power-saving power-down. - * uCode resets this when power-up is complete and SRAM is sane. - * NOTE: device saves internal SRAM data to host when powering down, - * and must restore this data after powering back up. - * MAC_SLEEP is the best indication that restore is complete. - * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and - * do not need to save/restore it. - */ -#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) -#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) -#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) -#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) -#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020) - -/* GP Driver */ -#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) -#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) -#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) -#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) -#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) -#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) - -#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080) - -/* GIO Chicken Bits (PCI Express bus link power management) */ -#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) -#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) - -/* LED */ -#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) -#define CSR_LED_REG_TURN_ON (0x60) -#define CSR_LED_REG_TURN_OFF (0x20) - -/* ANA_PLL */ -#define CSR50_ANA_PLL_CFG_VAL (0x00880300) - -/* HPET MEM debug */ -#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) - -/* DRAM INT TABLE */ -#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) -#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) -#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) - -/* - * SHR target access (Shared block memory space) - * - * Shared internal registers can be accessed directly from PCI bus through SHR - * arbiter without need for the MAC HW to be powered up. This is possible due to - * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and - * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers. - * - * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW - * need not be powered up so no "grab inc access" is required. - */ - -/* - * Registers for accessing shared registers (e.g. SHR_APMG_GP1, - * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC), - * first, write to the control register: - * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) - * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access) - * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0]. - * - * To write the register, first, write to the data register - * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then: - * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) - * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access) - */ -#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec) -#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4) - -/* - * HBUS (Host-side Bus) - * - * HBUS registers are mapped directly into PCI bus space, but are used - * to indirectly access device's internal memory or registers that - * may be powered-down. - * - * Use iwl_write_direct32()/iwl_read_direct32() family for these registers; - * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ - * to make sure the MAC (uCode processor, etc.) is powered up for accessing - * internal resources. - * - * Do not use iwl_write32()/iwl_read32() family to access these registers; - * these provide only simple PCI bus access, without waking up the MAC. - */ -#define HBUS_BASE (0x400) - -/* - * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM - * structures, error log, event log, verifying uCode load). - * First write to address register, then read from or write to data register - * to complete the job. Once the address register is set up, accesses to - * data registers auto-increment the address by one dword. - * Bit usage for address registers (read or write): - * 0-31: memory address within device - */ -#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) -#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) -#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) -#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) - -/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ -#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) -#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) - -/* - * Registers for accessing device's internal peripheral registers - * (e.g. SCD, BSM, etc.). First write to address register, - * then read from or write to data register to complete the job. - * Bit usage for address registers (read or write): - * 0-15: register address (offset) within device - * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) - */ -#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) -#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) -#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) -#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) - -/* Used to enable DBGM */ -#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c) - -/* - * Per-Tx-queue write pointer (index, really!) - * Indicates index to next TFD that driver will fill (1 past latest filled). - * Bit usage: - * 0-7: queue write index - * 11-8: queue selector - */ -#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) - -/********************************************************** - * CSR values - **********************************************************/ - /* - * host interrupt timeout value - * used with setting interrupt coalescing timer - * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit - * - * default interrupt coalescing timer is 64 x 32 = 2048 usecs - */ -#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) -#define IWL_HOST_INT_TIMEOUT_DEF (0x40) -#define IWL_HOST_INT_TIMEOUT_MIN (0x0) -#define IWL_HOST_INT_OPER_MODE BIT(31) - -/***************************************************************************** - * 7000/3000 series SHR DTS addresses * - *****************************************************************************/ - -/* Diode Results Register Structure: */ -enum dtd_diode_reg { - DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ - DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ - DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ - DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ - DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ - DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ -/* Those are the masks INSIDE the flags bit-field: */ - DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, - DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ - DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, - DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ -}; - -#endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c deleted file mode 100644 index 09feff4fa226..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-debug.c +++ /dev/null @@ -1,136 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include -#include "iwl-drv.h" -#include "iwl-debug.h" -#include "iwl-devtrace.h" - -#define __iwl_fn(fn) \ -void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ -{ \ - struct va_format vaf = { \ - .fmt = fmt, \ - }; \ - va_list args; \ - \ - va_start(args, fmt); \ - vaf.va = &args; \ - dev_ ##fn(dev, "%pV", &vaf); \ - trace_iwlwifi_ ##fn(&vaf); \ - va_end(args); \ -} - -__iwl_fn(warn) -IWL_EXPORT_SYMBOL(__iwl_warn); -__iwl_fn(info) -IWL_EXPORT_SYMBOL(__iwl_info); -__iwl_fn(crit) -IWL_EXPORT_SYMBOL(__iwl_crit); - -void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, - const char *fmt, ...) -{ - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; - - va_start(args, fmt); - vaf.va = &args; - if (!trace_only) { - if (rfkill_prefix) - dev_err(dev, "(RFKILL) %pV", &vaf); - else - dev_err(dev, "%pV", &vaf); - } - trace_iwlwifi_err(&vaf); - va_end(args); -} -IWL_EXPORT_SYMBOL(__iwl_err); - -#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) -void __iwl_dbg(struct device *dev, - u32 level, bool limit, const char *function, - const char *fmt, ...) -{ - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; - - va_start(args, fmt); - vaf.va = &args; -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(level) && - (!limit || net_ratelimit())) - dev_printk(KERN_DEBUG, dev, "%c %s %pV", - in_interrupt() ? 'I' : 'U', function, &vaf); -#endif - trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); - va_end(args); -} -IWL_EXPORT_SYMBOL(__iwl_dbg); -#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h deleted file mode 100644 index 9bb36d79c2bd..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ /dev/null @@ -1,225 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_debug_h__ -#define __iwl_debug_h__ - -#include "iwl-modparams.h" - - -static inline bool iwl_have_debug_level(u32 level) -{ -#ifdef CONFIG_IWLWIFI_DEBUG - return iwlwifi_mod_params.debug_level & level; -#else - return false; -#endif -} - -void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, - const char *fmt, ...) __printf(4, 5); -void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3); -void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3); -void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); - -/* not all compilers can evaluate strlen() at compile time, so use sizeof() */ -#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n') - -/* No matter what is m (priv, bus, trans), this will work */ -#define IWL_ERR_DEV(d, f, a...) \ - do { \ - CHECK_FOR_NEWLINE(f); \ - __iwl_err((d), false, false, f, ## a); \ - } while (0) -#define IWL_ERR(m, f, a...) \ - IWL_ERR_DEV((m)->dev, f, ## a) -#define IWL_WARN(m, f, a...) \ - do { \ - CHECK_FOR_NEWLINE(f); \ - __iwl_warn((m)->dev, f, ## a); \ - } while (0) -#define IWL_INFO(m, f, a...) \ - do { \ - CHECK_FOR_NEWLINE(f); \ - __iwl_info((m)->dev, f, ## a); \ - } while (0) -#define IWL_CRIT(m, f, a...) \ - do { \ - CHECK_FOR_NEWLINE(f); \ - __iwl_crit((m)->dev, f, ## a); \ - } while (0) - -#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) -void __iwl_dbg(struct device *dev, - u32 level, bool limit, const char *function, - const char *fmt, ...) __printf(5, 6); -#else -__printf(5, 6) static inline void -__iwl_dbg(struct device *dev, - u32 level, bool limit, const char *function, - const char *fmt, ...) -{} -#endif - -#define iwl_print_hex_error(m, p, len) \ -do { \ - print_hex_dump(KERN_ERR, "iwl data: ", \ - DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ -} while (0) - -#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \ - do { \ - CHECK_FOR_NEWLINE(fmt); \ - __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \ - } while (0) -#define IWL_DEBUG(m, level, fmt, args...) \ - __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args) -#define IWL_DEBUG_DEV(dev, level, fmt, args...) \ - __IWL_DEBUG_DEV(dev, level, false, fmt, ##args) -#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ - __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args) - -#ifdef CONFIG_IWLWIFI_DEBUG -#define iwl_print_hex_dump(m, level, p, len) \ -do { \ - if (iwl_have_debug_level(level)) \ - print_hex_dump(KERN_DEBUG, "iwl data: ", \ - DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ -} while (0) -#else -#define iwl_print_hex_dump(m, level, p, len) -#endif /* CONFIG_IWLWIFI_DEBUG */ - -/* - * To use the debug system: - * - * If you are defining a new debug classification, simply add it to the #define - * list here in the form of - * - * #define IWL_DL_xxxx VALUE - * - * where xxxx should be the name of the classification (for example, WEP). - * - * You then need to either add a IWL_xxxx_DEBUG() macro definition for your - * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want - * to send output to that classification. - * - * The active debug levels can be accessed via files - * - * /sys/module/iwlwifi/parameters/debug - * when CONFIG_IWLWIFI_DEBUG=y. - * - * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level - * when CONFIG_IWLWIFI_DEBUGFS=y. - * - */ - -/* 0x0000000F - 0x00000001 */ -#define IWL_DL_INFO 0x00000001 -#define IWL_DL_MAC80211 0x00000002 -#define IWL_DL_HCMD 0x00000004 -#define IWL_DL_TDLS 0x00000008 -/* 0x000000F0 - 0x00000010 */ -#define IWL_DL_QUOTA 0x00000010 -#define IWL_DL_TE 0x00000020 -#define IWL_DL_EEPROM 0x00000040 -#define IWL_DL_RADIO 0x00000080 -/* 0x00000F00 - 0x00000100 */ -#define IWL_DL_POWER 0x00000100 -#define IWL_DL_TEMP 0x00000200 -#define IWL_DL_RPM 0x00000400 -#define IWL_DL_SCAN 0x00000800 -/* 0x0000F000 - 0x00001000 */ -#define IWL_DL_ASSOC 0x00001000 -#define IWL_DL_DROP 0x00002000 -#define IWL_DL_LAR 0x00004000 -#define IWL_DL_COEX 0x00008000 -/* 0x000F0000 - 0x00010000 */ -#define IWL_DL_FW 0x00010000 -#define IWL_DL_RF_KILL 0x00020000 -#define IWL_DL_FW_ERRORS 0x00040000 -#define IWL_DL_LED 0x00080000 -/* 0x00F00000 - 0x00100000 */ -#define IWL_DL_RATE 0x00100000 -#define IWL_DL_CALIB 0x00200000 -#define IWL_DL_WEP 0x00400000 -#define IWL_DL_TX 0x00800000 -/* 0x0F000000 - 0x01000000 */ -#define IWL_DL_RX 0x01000000 -#define IWL_DL_ISR 0x02000000 -#define IWL_DL_HT 0x04000000 -#define IWL_DL_EXTERNAL 0x08000000 -/* 0xF0000000 - 0x10000000 */ -#define IWL_DL_11H 0x10000000 -#define IWL_DL_STATS 0x20000000 -#define IWL_DL_TX_REPLY 0x40000000 -#define IWL_DL_TX_QUEUES 0x80000000 - -#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) -#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a) -#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) -#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) -#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) -#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) -#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) -#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) -#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) -#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) -#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) -#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) -#define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a) -#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a) -#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) -#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) -#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) -#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) -#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a) -#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) -#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) -#define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a) -#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) -#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) -#define IWL_DEBUG_ASSOC(p, f, a...) \ - IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) -#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) -#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) -#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) -#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ - IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) -#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) -#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) -#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) -#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) -#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) -#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) -#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) - -#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h deleted file mode 100644 index 71a78cede9b0..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h +++ /dev/null @@ -1,80 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ) -#define __IWLWIFI_DEVICE_TRACE_DATA - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi_data - -TRACE_EVENT(iwlwifi_dev_tx_data, - TP_PROTO(const struct device *dev, - struct sk_buff *skb, - u8 hdr_len, size_t data_len), - TP_ARGS(dev, skb, hdr_len, data_len), - TP_STRUCT__entry( - DEV_ENTRY - - __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0) - ), - TP_fast_assign( - DEV_ASSIGN; - if (iwl_trace_data(skb)) - skb_copy_bits(skb, hdr_len, - __get_dynamic_array(data), data_len); - ), - TP_printk("[%s] TX frame data", __get_str(dev)) -); - -TRACE_EVENT(iwlwifi_dev_rx_data, - TP_PROTO(const struct device *dev, - const struct iwl_trans *trans, - void *rxbuf, size_t len), - TP_ARGS(dev, trans, rxbuf, len), - TP_STRUCT__entry( - DEV_ENTRY - - __dynamic_array(u8, data, - len - iwl_rx_trace_len(trans, rxbuf, len)) - ), - TP_fast_assign( - size_t offs = iwl_rx_trace_len(trans, rxbuf, len); - DEV_ASSIGN; - if (offs < len) - memcpy(__get_dynamic_array(data), - ((u8 *)rxbuf) + offs, len - offs); - ), - TP_printk("[%s] RX frame data", __get_str(dev)) -); -#endif /* __IWLWIFI_DEVICE_TRACE_DATA */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE iwl-devtrace-data -#include diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h deleted file mode 100644 index f62c54485852..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-io.h +++ /dev/null @@ -1,155 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ) -#define __IWLWIFI_DEVICE_TRACE_IO - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi_io - -TRACE_EVENT(iwlwifi_dev_ioread32, - TP_PROTO(const struct device *dev, u32 offs, u32 val), - TP_ARGS(dev, offs, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, offs) - __field(u32, val) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%s] read io[%#x] = %#x", - __get_str(dev), __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_dev_iowrite8, - TP_PROTO(const struct device *dev, u32 offs, u8 val), - TP_ARGS(dev, offs, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, offs) - __field(u8, val) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%s] write io[%#x] = %#x)", - __get_str(dev), __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_dev_iowrite32, - TP_PROTO(const struct device *dev, u32 offs, u32 val), - TP_ARGS(dev, offs, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, offs) - __field(u32, val) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%s] write io[%#x] = %#x)", - __get_str(dev), __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_dev_iowrite_prph32, - TP_PROTO(const struct device *dev, u32 offs, u32 val), - TP_ARGS(dev, offs, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, offs) - __field(u32, val) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%s] write PRPH[%#x] = %#x)", - __get_str(dev), __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_dev_ioread_prph32, - TP_PROTO(const struct device *dev, u32 offs, u32 val), - TP_ARGS(dev, offs, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, offs) - __field(u32, val) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->offs = offs; - __entry->val = val; - ), - TP_printk("[%s] read PRPH[%#x] = %#x", - __get_str(dev), __entry->offs, __entry->val) -); - -TRACE_EVENT(iwlwifi_dev_irq, - TP_PROTO(const struct device *dev), - TP_ARGS(dev), - TP_STRUCT__entry( - DEV_ENTRY - ), - TP_fast_assign( - DEV_ASSIGN; - ), - /* TP_printk("") doesn't compile */ - TP_printk("%d", 0) -); - -TRACE_EVENT(iwlwifi_dev_ict_read, - TP_PROTO(const struct device *dev, u32 index, u32 value), - TP_ARGS(dev, index, value), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, index) - __field(u32, value) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->index = index; - __entry->value = value; - ), - TP_printk("[%s] read ict[%d] = %#.8x", - __get_str(dev), __entry->index, __entry->value) -); -#endif /* __IWLWIFI_DEVICE_TRACE_IO */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE iwl-devtrace-io -#include diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h deleted file mode 100644 index eb4b99a1c8cd..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h +++ /dev/null @@ -1,209 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ) -#define __IWLWIFI_DEVICE_TRACE_IWLWIFI - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi - -TRACE_EVENT(iwlwifi_dev_hcmd, - TP_PROTO(const struct device *dev, - struct iwl_host_cmd *cmd, u16 total_size, - struct iwl_cmd_header_wide *hdr), - TP_ARGS(dev, cmd, total_size, hdr), - TP_STRUCT__entry( - DEV_ENTRY - __dynamic_array(u8, hcmd, total_size) - __field(u32, flags) - ), - TP_fast_assign( - int i, offset = sizeof(struct iwl_cmd_header); - - if (hdr->group_id) - offset = sizeof(struct iwl_cmd_header_wide); - - DEV_ASSIGN; - __entry->flags = cmd->flags; - memcpy(__get_dynamic_array(hcmd), hdr, offset); - - for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - if (!cmd->len[i]) - continue; - memcpy((u8 *)__get_dynamic_array(hcmd) + offset, - cmd->data[i], cmd->len[i]); - offset += cmd->len[i]; - } - ), - TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)", - __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1], - ((u8 *)__get_dynamic_array(hcmd))[0], - __entry->flags & CMD_ASYNC ? "a" : "") -); - -TRACE_EVENT(iwlwifi_dev_rx, - TP_PROTO(const struct device *dev, const struct iwl_trans *trans, - struct iwl_rx_packet *pkt, size_t len), - TP_ARGS(dev, trans, pkt, len), - TP_STRUCT__entry( - DEV_ENTRY - __field(u8, cmd) - __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->cmd = pkt->hdr.cmd; - memcpy(__get_dynamic_array(rxbuf), pkt, - iwl_rx_trace_len(trans, pkt, len)); - ), - TP_printk("[%s] RX cmd %#.2x", - __get_str(dev), __entry->cmd) -); - -TRACE_EVENT(iwlwifi_dev_tx, - TP_PROTO(const struct device *dev, struct sk_buff *skb, - void *tfd, size_t tfdlen, - void *buf0, size_t buf0_len, - void *buf1, size_t buf1_len), - TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), - TP_STRUCT__entry( - DEV_ENTRY - - __field(size_t, framelen) - __dynamic_array(u8, tfd, tfdlen) - - /* - * Do not insert between or below these items, - * we want to keep the frame together (except - * for the possible padding). - */ - __dynamic_array(u8, buf0, buf0_len) - __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->framelen = buf0_len + buf1_len; - memcpy(__get_dynamic_array(tfd), tfd, tfdlen); - memcpy(__get_dynamic_array(buf0), buf0, buf0_len); - if (!iwl_trace_data(skb)) - memcpy(__get_dynamic_array(buf1), buf1, buf1_len); - ), - TP_printk("[%s] TX %.2x (%zu bytes)", - __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], - __entry->framelen) -); - -TRACE_EVENT(iwlwifi_dev_ucode_error, - TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, - u32 data1, u32 data2, u32 line, u32 blink1, - u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, - u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver, - u32 brd_ver), - TP_ARGS(dev, desc, tsf_low, data1, data2, line, - blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, - gp3, major, minor, hw_ver, brd_ver), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, desc) - __field(u32, tsf_low) - __field(u32, data1) - __field(u32, data2) - __field(u32, line) - __field(u32, blink1) - __field(u32, blink2) - __field(u32, ilink1) - __field(u32, ilink2) - __field(u32, bcon_time) - __field(u32, gp1) - __field(u32, gp2) - __field(u32, gp3) - __field(u32, major) - __field(u32, minor) - __field(u32, hw_ver) - __field(u32, brd_ver) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->desc = desc; - __entry->tsf_low = tsf_low; - __entry->data1 = data1; - __entry->data2 = data2; - __entry->line = line; - __entry->blink1 = blink1; - __entry->blink2 = blink2; - __entry->ilink1 = ilink1; - __entry->ilink2 = ilink2; - __entry->bcon_time = bcon_time; - __entry->gp1 = gp1; - __entry->gp2 = gp2; - __entry->gp3 = gp3; - __entry->major = major; - __entry->minor = minor; - __entry->hw_ver = hw_ver; - __entry->brd_ver = brd_ver; - ), - TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " - "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " - "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X " - "minor 0x%08X hw 0x%08X brd 0x%08X", - __get_str(dev), __entry->desc, __entry->tsf_low, - __entry->data1, - __entry->data2, __entry->line, __entry->blink1, - __entry->blink2, __entry->ilink1, __entry->ilink2, - __entry->bcon_time, __entry->gp1, __entry->gp2, - __entry->gp3, __entry->major, __entry->minor, - __entry->hw_ver, __entry->brd_ver) -); - -TRACE_EVENT(iwlwifi_dev_ucode_event, - TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), - TP_ARGS(dev, time, data, ev), - TP_STRUCT__entry( - DEV_ENTRY - - __field(u32, time) - __field(u32, data) - __field(u32, ev) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->time = time; - __entry->data = data; - __entry->ev = ev; - ), - TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", - __get_str(dev), __entry->time, __entry->data, __entry->ev) -); -#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE iwl-devtrace-iwlwifi -#include diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h deleted file mode 100644 index a3b3c2465f89..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-msg.h +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ) -#define __IWLWIFI_DEVICE_TRACE_MSG - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi_msg - -#define MAX_MSG_LEN 110 - -DECLARE_EVENT_CLASS(iwlwifi_msg_event, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf), - TP_STRUCT__entry( - __dynamic_array(char, msg, MAX_MSG_LEN) - ), - TP_fast_assign( - WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), - MAX_MSG_LEN, vaf->fmt, - *vaf->va) >= MAX_MSG_LEN); - ), - TP_printk("%s", __get_str(msg)) -); - -DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) -); - -DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) -); - -DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) -); - -DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) -); - -TRACE_EVENT(iwlwifi_dbg, - TP_PROTO(u32 level, bool in_interrupt, const char *function, - struct va_format *vaf), - TP_ARGS(level, in_interrupt, function, vaf), - TP_STRUCT__entry( - __field(u32, level) - __field(u8, in_interrupt) - __string(function, function) - __dynamic_array(char, msg, MAX_MSG_LEN) - ), - TP_fast_assign( - __entry->level = level; - __entry->in_interrupt = in_interrupt; - __assign_str(function, function); - WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), - MAX_MSG_LEN, vaf->fmt, - *vaf->va) >= MAX_MSG_LEN); - ), - TP_printk("%s", __get_str(msg)) -); -#endif /* __IWLWIFI_DEVICE_TRACE_MSG */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE iwl-devtrace-msg -#include diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h deleted file mode 100644 index 10839fae9cd9..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-ucode.h +++ /dev/null @@ -1,81 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ) -#define __IWLWIFI_DEVICE_TRACE_UCODE - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM iwlwifi_ucode - -TRACE_EVENT(iwlwifi_dev_ucode_cont_event, - TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev), - TP_ARGS(dev, time, data, ev), - TP_STRUCT__entry( - DEV_ENTRY - - __field(u32, time) - __field(u32, data) - __field(u32, ev) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->time = time; - __entry->data = data; - __entry->ev = ev; - ), - TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u", - __get_str(dev), __entry->time, __entry->data, __entry->ev) -); - -TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, - TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry), - TP_ARGS(dev, wraps, n_entry, p_entry), - TP_STRUCT__entry( - DEV_ENTRY - - __field(u32, wraps) - __field(u32, n_entry) - __field(u32, p_entry) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->wraps = wraps; - __entry->n_entry = n_entry; - __entry->p_entry = p_entry; - ), - TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X", - __get_str(dev), __entry->wraps, __entry->n_entry, - __entry->p_entry) -); -#endif /* __IWLWIFI_DEVICE_TRACE_UCODE */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE iwl-devtrace-ucode -#include diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c deleted file mode 100644 index 90987d6f348e..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ /dev/null @@ -1,43 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include - -/* sparse doesn't like tracepoint macros */ -#ifndef __CHECKER__ -#include "iwl-trans.h" - -#define CREATE_TRACE_POINTS -#include "iwl-devtrace.h" - -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); -#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h deleted file mode 100644 index b87acd6a229b..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ /dev/null @@ -1,89 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __IWLWIFI_DEVICE_TRACE -#include -#include -#include -#include "iwl-trans.h" -#if !defined(__IWLWIFI_DEVICE_TRACE) -static inline bool iwl_trace_data(struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (void *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (!ieee80211_is_data(hdr->frame_control)) - return false; - return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO); -} - -static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, - void *rxbuf, size_t len) -{ - struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32)); - struct ieee80211_hdr *hdr; - - if (cmd->cmd != trans->rx_mpdu_cmd) - return len; - - hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) + - trans->rx_mpdu_cmd_hdr_size); - if (!ieee80211_is_data(hdr->frame_control)) - return len; - /* maybe try to identify EAPOL frames? */ - return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size + - ieee80211_hdrlen(hdr->frame_control); -} -#endif - -#define __IWLWIFI_DEVICE_TRACE - -#include -#include -#include "iwl-trans.h" - - -#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, ...) \ -static inline void trace_ ## name(proto) {} -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(...) -#undef DEFINE_EVENT -#define DEFINE_EVENT(evt_class, name, proto, ...) \ -static inline void trace_ ## name(proto) {} -#endif - -#define DEV_ENTRY __string(dev, dev_name(dev)) -#define DEV_ASSIGN __assign_str(dev, dev_name(dev)) - -#include "iwl-devtrace-io.h" -#include "iwl-devtrace-ucode.h" -#include "iwl-devtrace-msg.h" -#include "iwl-devtrace-data.h" -#include "iwl-devtrace-iwlwifi.h" - -#endif /* __IWLWIFI_DEVICE_TRACE */ diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c deleted file mode 100644 index 463cadfbfccb..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ /dev/null @@ -1,1706 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include -#include -#include -#include - -#include "iwl-drv.h" -#include "iwl-csr.h" -#include "iwl-debug.h" -#include "iwl-trans.h" -#include "iwl-op-mode.h" -#include "iwl-agn-hw.h" -#include "iwl-fw.h" -#include "iwl-config.h" -#include "iwl-modparams.h" - -/****************************************************************************** - * - * module boiler plate - * - ******************************************************************************/ - -#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_IWLWIFI_DEBUGFS -static struct dentry *iwl_dbgfs_root; -#endif - -/** - * struct iwl_drv - drv common data - * @list: list of drv structures using this opmode - * @fw: the iwl_fw structure - * @op_mode: the running op_mode - * @trans: transport layer - * @dev: for debug prints only - * @cfg: configuration struct - * @fw_index: firmware revision to try loading - * @firmware_name: composite filename of ucode file to load - * @request_firmware_complete: the firmware has been obtained from user space - */ -struct iwl_drv { - struct list_head list; - struct iwl_fw fw; - - struct iwl_op_mode *op_mode; - struct iwl_trans *trans; - struct device *dev; - const struct iwl_cfg *cfg; - - int fw_index; /* firmware we're trying to load */ - char firmware_name[32]; /* name of firmware file to load */ - - struct completion request_firmware_complete; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct dentry *dbgfs_drv; - struct dentry *dbgfs_trans; - struct dentry *dbgfs_op_mode; -#endif -}; - -enum { - DVM_OP_MODE = 0, - MVM_OP_MODE = 1, -}; - -/* Protects the table contents, i.e. the ops pointer & drv list */ -static struct mutex iwlwifi_opmode_table_mtx; -static struct iwlwifi_opmode_table { - const char *name; /* name: iwldvm, iwlmvm, etc */ - const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ - struct list_head drv; /* list of devices using this op_mode */ -} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ - [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, - [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, -}; - -#define IWL_DEFAULT_SCAN_CHANNELS 40 - -/* - * struct fw_sec: Just for the image parsing process. - * For the fw storage we are using struct fw_desc. - */ -struct fw_sec { - const void *data; /* the sec data */ - size_t size; /* section size */ - u32 offset; /* offset of writing in the device */ -}; - -static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) -{ - vfree(desc->data); - desc->data = NULL; - desc->len = 0; -} - -static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) -{ - int i; - for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) - iwl_free_fw_desc(drv, &img->sec[i]); -} - -static void iwl_dealloc_ucode(struct iwl_drv *drv) -{ - int i; - - kfree(drv->fw.dbg_dest_tlv); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) - kfree(drv->fw.dbg_conf_tlv[i]); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) - kfree(drv->fw.dbg_trigger_tlv[i]); - - for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) - iwl_free_fw_img(drv, drv->fw.img + i); -} - -static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, - struct fw_sec *sec) -{ - void *data; - - desc->data = NULL; - - if (!sec || !sec->size) - return -EINVAL; - - data = vmalloc(sec->size); - if (!data) - return -ENOMEM; - - desc->len = sec->size; - desc->offset = sec->offset; - memcpy(data, sec->data, desc->len); - desc->data = data; - - return 0; -} - -static void iwl_req_fw_callback(const struct firmware *ucode_raw, - void *context); - -#define UCODE_EXPERIMENTAL_INDEX 100 -#define UCODE_EXPERIMENTAL_TAG "exp" - -static int iwl_request_firmware(struct iwl_drv *drv, bool first) -{ - const char *name_pre = drv->cfg->fw_name_pre; - char tag[8]; - - if (first) { -#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE - drv->fw_index = UCODE_EXPERIMENTAL_INDEX; - strcpy(tag, UCODE_EXPERIMENTAL_TAG); - } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) { -#endif - drv->fw_index = drv->cfg->ucode_api_max; - sprintf(tag, "%d", drv->fw_index); - } else { - drv->fw_index--; - sprintf(tag, "%d", drv->fw_index); - } - - if (drv->fw_index < drv->cfg->ucode_api_min) { - IWL_ERR(drv, "no suitable firmware found!\n"); - return -ENOENT; - } - - snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", - name_pre, tag); - - /* - * Starting 8000B - FW name format has changed. This overwrites the - * previous name and uses the new format. - */ - if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); - - snprintf(drv->firmware_name, sizeof(drv->firmware_name), - "%s%c-%s.ucode", name_pre, rev_step, tag); - } - - IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", - (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) - ? "EXPERIMENTAL " : "", - drv->firmware_name); - - return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, - drv->trans->dev, - GFP_KERNEL, drv, iwl_req_fw_callback); -} - -struct fw_img_parsing { - struct fw_sec sec[IWL_UCODE_SECTION_MAX]; - int sec_counter; -}; - -/* - * struct fw_sec_parsing: to extract fw section and it's offset from tlv - */ -struct fw_sec_parsing { - __le32 offset; - const u8 data[]; -} __packed; - -/** - * struct iwl_tlv_calib_data - parse the default calib data from TLV - * - * @ucode_type: the uCode to which the following default calib relates. - * @calib: default calibrations. - */ -struct iwl_tlv_calib_data { - __le32 ucode_type; - struct iwl_tlv_calib_ctrl calib; -} __packed; - -struct iwl_firmware_pieces { - struct fw_img_parsing img[IWL_UCODE_TYPE_MAX]; - - u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; - u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; - - /* FW debug data parsed for driver usage */ - struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; - size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; - size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; -}; - -/* - * These functions are just to extract uCode section data from the pieces - * structure. - */ -static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type, - int sec) -{ - return &pieces->img[type].sec[sec]; -} - -static void set_sec_data(struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type, - int sec, - const void *data) -{ - pieces->img[type].sec[sec].data = data; -} - -static void set_sec_size(struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type, - int sec, - size_t size) -{ - pieces->img[type].sec[sec].size = size; -} - -static size_t get_sec_size(struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type, - int sec) -{ - return pieces->img[type].sec[sec].size; -} - -static void set_sec_offset(struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type, - int sec, - u32 offset) -{ - pieces->img[type].sec[sec].offset = offset; -} - -static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) -{ - int i, j; - struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data; - struct iwl_fw_cipher_scheme *fwcs; - struct ieee80211_cipher_scheme *cs; - u32 cipher; - - if (len < sizeof(*l) || - len < sizeof(l->size) + l->size * sizeof(l->cs[0])) - return -EINVAL; - - for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) { - fwcs = &l->cs[j]; - cipher = le32_to_cpu(fwcs->cipher); - - /* we skip schemes with zero cipher suite selector */ - if (!cipher) - continue; - - cs = &fw->cs[j++]; - cs->cipher = cipher; - cs->iftype = BIT(NL80211_IFTYPE_STATION); - cs->hdr_len = fwcs->hdr_len; - cs->pn_len = fwcs->pn_len; - cs->pn_off = fwcs->pn_off; - cs->key_idx_off = fwcs->key_idx_off; - cs->key_idx_mask = fwcs->key_idx_mask; - cs->key_idx_shift = fwcs->key_idx_shift; - cs->mic_len = fwcs->mic_len; - } - - return 0; -} - -static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, - const u32 len) -{ - struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; - struct iwl_gscan_capabilities *capa = &fw->gscan_capa; - - if (len < sizeof(*fw_capa)) - return -EINVAL; - - capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); - capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); - capa->max_ap_cache_per_scan = - le32_to_cpu(fw_capa->max_ap_cache_per_scan); - capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size); - capa->max_scan_reporting_threshold = - le32_to_cpu(fw_capa->max_scan_reporting_threshold); - capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps); - capa->max_significant_change_aps = - le32_to_cpu(fw_capa->max_significant_change_aps); - capa->max_bssid_history_entries = - le32_to_cpu(fw_capa->max_bssid_history_entries); - return 0; -} - -/* - * Gets uCode section from tlv. - */ -static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, - const void *data, enum iwl_ucode_type type, - int size) -{ - struct fw_img_parsing *img; - struct fw_sec *sec; - struct fw_sec_parsing *sec_parse; - - if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) - return -1; - - sec_parse = (struct fw_sec_parsing *)data; - - img = &pieces->img[type]; - sec = &img->sec[img->sec_counter]; - - sec->offset = le32_to_cpu(sec_parse->offset); - sec->data = sec_parse->data; - sec->size = size - sizeof(sec_parse->offset); - - ++img->sec_counter; - - return 0; -} - -static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) -{ - struct iwl_tlv_calib_data *def_calib = - (struct iwl_tlv_calib_data *)data; - u32 ucode_type = le32_to_cpu(def_calib->ucode_type); - if (ucode_type >= IWL_UCODE_TYPE_MAX) { - IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", - ucode_type); - return -EINVAL; - } - drv->fw.default_calib[ucode_type].flow_trigger = - def_calib->calib.flow_trigger; - drv->fw.default_calib[ucode_type].event_trigger = - def_calib->calib.event_trigger; - - return 0; -} - -static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, - struct iwl_ucode_capabilities *capa) -{ - const struct iwl_ucode_api *ucode_api = (void *)data; - u32 api_index = le32_to_cpu(ucode_api->api_index); - u32 api_flags = le32_to_cpu(ucode_api->api_flags); - int i; - - if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { - IWL_ERR(drv, "api_index larger than supported by driver\n"); - /* don't return an error so we can load FW that has more bits */ - return 0; - } - - for (i = 0; i < 32; i++) { - if (api_flags & BIT(i)) - __set_bit(i + 32 * api_index, capa->_api); - } - - return 0; -} - -static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, - struct iwl_ucode_capabilities *capa) -{ - const struct iwl_ucode_capa *ucode_capa = (void *)data; - u32 api_index = le32_to_cpu(ucode_capa->api_index); - u32 api_flags = le32_to_cpu(ucode_capa->api_capa); - int i; - - if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { - IWL_ERR(drv, "api_index larger than supported by driver\n"); - /* don't return an error so we can load FW that has more bits */ - return 0; - } - - for (i = 0; i < 32; i++) { - if (api_flags & BIT(i)) - __set_bit(i + 32 * api_index, capa->_capa); - } - - return 0; -} - -static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, - const struct firmware *ucode_raw, - struct iwl_firmware_pieces *pieces) -{ - struct iwl_ucode_header *ucode = (void *)ucode_raw->data; - u32 api_ver, hdr_size, build; - char buildstr[25]; - const u8 *src; - - drv->fw.ucode_ver = le32_to_cpu(ucode->ver); - api_ver = IWL_UCODE_API(drv->fw.ucode_ver); - - switch (api_ver) { - default: - hdr_size = 28; - if (ucode_raw->size < hdr_size) { - IWL_ERR(drv, "File size too small!\n"); - return -EINVAL; - } - build = le32_to_cpu(ucode->u.v2.build); - set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, - le32_to_cpu(ucode->u.v2.inst_size)); - set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, - le32_to_cpu(ucode->u.v2.data_size)); - set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, - le32_to_cpu(ucode->u.v2.init_size)); - set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, - le32_to_cpu(ucode->u.v2.init_data_size)); - src = ucode->u.v2.data; - break; - case 0: - case 1: - case 2: - hdr_size = 24; - if (ucode_raw->size < hdr_size) { - IWL_ERR(drv, "File size too small!\n"); - return -EINVAL; - } - build = 0; - set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, - le32_to_cpu(ucode->u.v1.inst_size)); - set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, - le32_to_cpu(ucode->u.v1.data_size)); - set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, - le32_to_cpu(ucode->u.v1.init_size)); - set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, - le32_to_cpu(ucode->u.v1.init_data_size)); - src = ucode->u.v1.data; - break; - } - - if (build) - sprintf(buildstr, " build %u%s", build, - (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) - ? " (EXP)" : ""); - else - buildstr[0] = '\0'; - - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), - "%u.%u.%u.%u%s", - IWL_UCODE_MAJOR(drv->fw.ucode_ver), - IWL_UCODE_MINOR(drv->fw.ucode_ver), - IWL_UCODE_API(drv->fw.ucode_ver), - IWL_UCODE_SERIAL(drv->fw.ucode_ver), - buildstr); - - /* Verify size of file vs. image size info in file's header */ - - if (ucode_raw->size != hdr_size + - get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) + - get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) + - get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) + - get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) { - - IWL_ERR(drv, - "uCode file size %d does not match expected size\n", - (int)ucode_raw->size); - return -EINVAL; - } - - - set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src); - src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST); - set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, - IWLAGN_RTC_INST_LOWER_BOUND); - set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src); - src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA); - set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, - IWLAGN_RTC_DATA_LOWER_BOUND); - set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src); - src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST); - set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, - IWLAGN_RTC_INST_LOWER_BOUND); - set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src); - src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA); - set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, - IWLAGN_RTC_DATA_LOWER_BOUND); - return 0; -} - -static int iwl_parse_tlv_firmware(struct iwl_drv *drv, - const struct firmware *ucode_raw, - struct iwl_firmware_pieces *pieces, - struct iwl_ucode_capabilities *capa) -{ - struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data; - struct iwl_ucode_tlv *tlv; - size_t len = ucode_raw->size; - const u8 *data; - u32 tlv_len; - u32 usniffer_img; - enum iwl_ucode_tlv_type tlv_type; - const u8 *tlv_data; - char buildstr[25]; - u32 build, paging_mem_size; - int num_of_cpus; - bool usniffer_images = false; - bool usniffer_req = false; - bool gscan_capa = false; - - if (len < sizeof(*ucode)) { - IWL_ERR(drv, "uCode has invalid length: %zd\n", len); - return -EINVAL; - } - - if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { - IWL_ERR(drv, "invalid uCode magic: 0X%x\n", - le32_to_cpu(ucode->magic)); - return -EINVAL; - } - - drv->fw.ucode_ver = le32_to_cpu(ucode->ver); - memcpy(drv->fw.human_readable, ucode->human_readable, - sizeof(drv->fw.human_readable)); - build = le32_to_cpu(ucode->build); - - if (build) - sprintf(buildstr, " build %u%s", build, - (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) - ? " (EXP)" : ""); - else - buildstr[0] = '\0'; - - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), - "%u.%u.%u.%u%s", - IWL_UCODE_MAJOR(drv->fw.ucode_ver), - IWL_UCODE_MINOR(drv->fw.ucode_ver), - IWL_UCODE_API(drv->fw.ucode_ver), - IWL_UCODE_SERIAL(drv->fw.ucode_ver), - buildstr); - - data = ucode->data; - - len -= sizeof(*ucode); - - while (len >= sizeof(*tlv)) { - len -= sizeof(*tlv); - tlv = (void *)data; - - tlv_len = le32_to_cpu(tlv->length); - tlv_type = le32_to_cpu(tlv->type); - tlv_data = tlv->data; - - if (len < tlv_len) { - IWL_ERR(drv, "invalid TLV len: %zd/%u\n", - len, tlv_len); - return -EINVAL; - } - len -= ALIGN(tlv_len, 4); - data += sizeof(*tlv) + ALIGN(tlv_len, 4); - - switch (tlv_type) { - case IWL_UCODE_TLV_INST: - set_sec_data(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST, tlv_data); - set_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST, tlv_len); - set_sec_offset(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST, - IWLAGN_RTC_INST_LOWER_BOUND); - break; - case IWL_UCODE_TLV_DATA: - set_sec_data(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA, tlv_data); - set_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA, tlv_len); - set_sec_offset(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA, - IWLAGN_RTC_DATA_LOWER_BOUND); - break; - case IWL_UCODE_TLV_INIT: - set_sec_data(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_INST, tlv_data); - set_sec_size(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_INST, tlv_len); - set_sec_offset(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_INST, - IWLAGN_RTC_INST_LOWER_BOUND); - break; - case IWL_UCODE_TLV_INIT_DATA: - set_sec_data(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_DATA, tlv_data); - set_sec_size(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_DATA, tlv_len); - set_sec_offset(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_DATA, - IWLAGN_RTC_DATA_LOWER_BOUND); - break; - case IWL_UCODE_TLV_BOOT: - IWL_ERR(drv, "Found unexpected BOOT ucode\n"); - break; - case IWL_UCODE_TLV_PROBE_MAX_LEN: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - capa->max_probe_length = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_PAN: - if (tlv_len) - goto invalid_tlv_len; - capa->flags |= IWL_UCODE_TLV_FLAGS_PAN; - break; - case IWL_UCODE_TLV_FLAGS: - /* must be at least one u32 */ - if (tlv_len < sizeof(u32)) - goto invalid_tlv_len; - /* and a proper number of u32s */ - if (tlv_len % sizeof(u32)) - goto invalid_tlv_len; - /* - * This driver only reads the first u32 as - * right now no more features are defined, - * if that changes then either the driver - * will not work with the new firmware, or - * it'll not take advantage of new features. - */ - capa->flags = le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_API_CHANGES_SET: - if (tlv_len != sizeof(struct iwl_ucode_api)) - goto invalid_tlv_len; - if (iwl_set_ucode_api_flags(drv, tlv_data, capa)) - goto tlv_error; - break; - case IWL_UCODE_TLV_ENABLED_CAPABILITIES: - if (tlv_len != sizeof(struct iwl_ucode_capa)) - goto invalid_tlv_len; - if (iwl_set_ucode_capabilities(drv, tlv_data, capa)) - goto tlv_error; - break; - case IWL_UCODE_TLV_INIT_EVTLOG_PTR: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - pieces->init_evtlog_ptr = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - pieces->init_evtlog_size = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_INIT_ERRLOG_PTR: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - pieces->init_errlog_ptr = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - pieces->inst_evtlog_ptr = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - pieces->inst_evtlog_size = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - pieces->inst_errlog_ptr = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_ENHANCE_SENS_TBL: - if (tlv_len) - goto invalid_tlv_len; - drv->fw.enhance_sensitivity_table = true; - break; - case IWL_UCODE_TLV_WOWLAN_INST: - set_sec_data(pieces, IWL_UCODE_WOWLAN, - IWL_UCODE_SECTION_INST, tlv_data); - set_sec_size(pieces, IWL_UCODE_WOWLAN, - IWL_UCODE_SECTION_INST, tlv_len); - set_sec_offset(pieces, IWL_UCODE_WOWLAN, - IWL_UCODE_SECTION_INST, - IWLAGN_RTC_INST_LOWER_BOUND); - break; - case IWL_UCODE_TLV_WOWLAN_DATA: - set_sec_data(pieces, IWL_UCODE_WOWLAN, - IWL_UCODE_SECTION_DATA, tlv_data); - set_sec_size(pieces, IWL_UCODE_WOWLAN, - IWL_UCODE_SECTION_DATA, tlv_len); - set_sec_offset(pieces, IWL_UCODE_WOWLAN, - IWL_UCODE_SECTION_DATA, - IWLAGN_RTC_DATA_LOWER_BOUND); - break; - case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - capa->standard_phy_calibration_size = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_SEC_RT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, - tlv_len); - drv->fw.mvm_fw = true; - break; - case IWL_UCODE_TLV_SEC_INIT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, - tlv_len); - drv->fw.mvm_fw = true; - break; - case IWL_UCODE_TLV_SEC_WOWLAN: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, - tlv_len); - drv->fw.mvm_fw = true; - break; - case IWL_UCODE_TLV_DEF_CALIB: - if (tlv_len != sizeof(struct iwl_tlv_calib_data)) - goto invalid_tlv_len; - if (iwl_set_default_calib(drv, tlv_data)) - goto tlv_error; - break; - case IWL_UCODE_TLV_PHY_SKU: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data); - drv->fw.valid_tx_ant = (drv->fw.phy_config & - FW_PHY_CFG_TX_CHAIN) >> - FW_PHY_CFG_TX_CHAIN_POS; - drv->fw.valid_rx_ant = (drv->fw.phy_config & - FW_PHY_CFG_RX_CHAIN) >> - FW_PHY_CFG_RX_CHAIN_POS; - break; - case IWL_UCODE_TLV_SECURE_SEC_RT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, - tlv_len); - drv->fw.mvm_fw = true; - break; - case IWL_UCODE_TLV_SECURE_SEC_INIT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, - tlv_len); - drv->fw.mvm_fw = true; - break; - case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, - tlv_len); - drv->fw.mvm_fw = true; - break; - case IWL_UCODE_TLV_NUM_OF_CPU: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - num_of_cpus = - le32_to_cpup((__le32 *)tlv_data); - - if (num_of_cpus == 2) { - drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus = - true; - drv->fw.img[IWL_UCODE_INIT].is_dual_cpus = - true; - drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus = - true; - } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { - IWL_ERR(drv, "Driver support upto 2 CPUs\n"); - return -EINVAL; - } - break; - case IWL_UCODE_TLV_CSCHEME: - if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len)) - goto invalid_tlv_len; - break; - case IWL_UCODE_TLV_N_SCAN_CHANNELS: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - capa->n_scan_channels = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_FW_VERSION: { - __le32 *ptr = (void *)tlv_data; - u32 major, minor; - u8 local_comp; - - if (tlv_len != sizeof(u32) * 3) - goto invalid_tlv_len; - - major = le32_to_cpup(ptr++); - minor = le32_to_cpup(ptr++); - local_comp = le32_to_cpup(ptr); - - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), "%u.%u.%u", - major, minor, local_comp); - break; - } - case IWL_UCODE_TLV_FW_DBG_DEST: { - struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data; - - if (pieces->dbg_dest_tlv) { - IWL_ERR(drv, - "dbg destination ignored, already exists\n"); - break; - } - - pieces->dbg_dest_tlv = dest; - IWL_INFO(drv, "Found debug destination: %s\n", - get_fw_dbg_mode_string(dest->monitor_mode)); - - drv->fw.dbg_dest_reg_num = - tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, - reg_ops); - drv->fw.dbg_dest_reg_num /= - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); - - break; - } - case IWL_UCODE_TLV_FW_DBG_CONF: { - struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data; - - if (!pieces->dbg_dest_tlv) { - IWL_ERR(drv, - "Ignore dbg config %d - no destination configured\n", - conf->id); - break; - } - - if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) { - IWL_ERR(drv, - "Skip unknown configuration: %d\n", - conf->id); - break; - } - - if (pieces->dbg_conf_tlv[conf->id]) { - IWL_ERR(drv, - "Ignore duplicate dbg config %d\n", - conf->id); - break; - } - - if (conf->usniffer) - usniffer_req = true; - - IWL_INFO(drv, "Found debug configuration: %d\n", - conf->id); - - pieces->dbg_conf_tlv[conf->id] = conf; - pieces->dbg_conf_tlv_len[conf->id] = tlv_len; - break; - } - case IWL_UCODE_TLV_FW_DBG_TRIGGER: { - struct iwl_fw_dbg_trigger_tlv *trigger = - (void *)tlv_data; - u32 trigger_id = le32_to_cpu(trigger->id); - - if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) { - IWL_ERR(drv, - "Skip unknown trigger: %u\n", - trigger->id); - break; - } - - if (pieces->dbg_trigger_tlv[trigger_id]) { - IWL_ERR(drv, - "Ignore duplicate dbg trigger %u\n", - trigger->id); - break; - } - - IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id); - - pieces->dbg_trigger_tlv[trigger_id] = trigger; - pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; - break; - } - case IWL_UCODE_TLV_SEC_RT_USNIFFER: - usniffer_images = true; - iwl_store_ucode_sec(pieces, tlv_data, - IWL_UCODE_REGULAR_USNIFFER, - tlv_len); - break; - case IWL_UCODE_TLV_PAGING: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - paging_mem_size = le32_to_cpup((__le32 *)tlv_data); - - IWL_DEBUG_FW(drv, - "Paging: paging enabled (size = %u bytes)\n", - paging_mem_size); - - if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { - IWL_ERR(drv, - "Paging: driver supports up to %lu bytes for paging image\n", - MAX_PAGING_IMAGE_SIZE); - return -EINVAL; - } - - if (paging_mem_size & (FW_PAGING_SIZE - 1)) { - IWL_ERR(drv, - "Paging: image isn't multiple %lu\n", - FW_PAGING_SIZE); - return -EINVAL; - } - - drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = - paging_mem_size; - usniffer_img = IWL_UCODE_REGULAR_USNIFFER; - drv->fw.img[usniffer_img].paging_mem_size = - paging_mem_size; - break; - case IWL_UCODE_TLV_SDIO_ADMA_ADDR: - if (tlv_len != sizeof(u32)) - goto invalid_tlv_len; - drv->fw.sdio_adma_addr = - le32_to_cpup((__le32 *)tlv_data); - break; - case IWL_UCODE_TLV_FW_GSCAN_CAPA: - if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len)) - goto invalid_tlv_len; - gscan_capa = true; - break; - default: - IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); - break; - } - } - - if (usniffer_req && !usniffer_images) { - IWL_ERR(drv, - "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); - return -EINVAL; - } - - if (len) { - IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); - iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len); - return -EINVAL; - } - - /* - * If ucode advertises that it supports GSCAN but GSCAN - * capabilities TLV is not present, warn and continue without GSCAN. - */ - if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && - WARN(!gscan_capa, - "GSCAN is supported but capabilities TLV is unavailable\n")) - __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, - capa->_capa); - - return 0; - - invalid_tlv_len: - IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); - tlv_error: - iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len); - - return -EINVAL; -} - -static int iwl_alloc_ucode(struct iwl_drv *drv, - struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type) -{ - int i; - for (i = 0; - i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i); - i++) - if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]), - get_sec(pieces, type, i))) - return -ENOMEM; - return 0; -} - -static int validate_sec_sizes(struct iwl_drv *drv, - struct iwl_firmware_pieces *pieces, - const struct iwl_cfg *cfg) -{ - IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n", - get_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST)); - IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n", - get_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA)); - IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n", - get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); - IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n", - get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); - - /* Verify that uCode images will fit in card's SRAM. */ - if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > - cfg->max_inst_size) { - IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n", - get_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST)); - return -1; - } - - if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > - cfg->max_data_size) { - IWL_ERR(drv, "uCode data len %Zd too large to fit in\n", - get_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA)); - return -1; - } - - if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > - cfg->max_inst_size) { - IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n", - get_sec_size(pieces, IWL_UCODE_INIT, - IWL_UCODE_SECTION_INST)); - return -1; - } - - if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > - cfg->max_data_size) { - IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n", - get_sec_size(pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA)); - return -1; - } - return 0; -} - -static struct iwl_op_mode * -_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) -{ - const struct iwl_op_mode_ops *ops = op->ops; - struct dentry *dbgfs_dir = NULL; - struct iwl_op_mode *op_mode = NULL; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - drv->dbgfs_op_mode = debugfs_create_dir(op->name, - drv->dbgfs_drv); - if (!drv->dbgfs_op_mode) { - IWL_ERR(drv, - "failed to create opmode debugfs directory\n"); - return op_mode; - } - dbgfs_dir = drv->dbgfs_op_mode; -#endif - - op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (!op_mode) { - debugfs_remove_recursive(drv->dbgfs_op_mode); - drv->dbgfs_op_mode = NULL; - } -#endif - - return op_mode; -} - -static void _iwl_op_mode_stop(struct iwl_drv *drv) -{ - /* op_mode can be NULL if its start failed */ - if (drv->op_mode) { - iwl_op_mode_stop(drv->op_mode); - drv->op_mode = NULL; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - debugfs_remove_recursive(drv->dbgfs_op_mode); - drv->dbgfs_op_mode = NULL; -#endif - } -} - -/** - * iwl_req_fw_callback - callback when firmware was loaded - * - * If loaded successfully, copies the firmware into buffers - * for the card to fetch (via DMA). - */ -static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) -{ - struct iwl_drv *drv = context; - struct iwl_fw *fw = &drv->fw; - struct iwl_ucode_header *ucode; - struct iwlwifi_opmode_table *op; - int err; - struct iwl_firmware_pieces *pieces; - const unsigned int api_max = drv->cfg->ucode_api_max; - unsigned int api_ok = drv->cfg->ucode_api_ok; - const unsigned int api_min = drv->cfg->ucode_api_min; - size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; - u32 api_ver; - int i; - bool load_module = false; - - fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; - fw->ucode_capa.standard_phy_calibration_size = - IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; - fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - - if (!api_ok) - api_ok = api_max; - - pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); - if (!pieces) - return; - - if (!ucode_raw) { - if (drv->fw_index <= api_ok) - IWL_ERR(drv, - "request for firmware file '%s' failed.\n", - drv->firmware_name); - goto try_again; - } - - IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", - drv->firmware_name, ucode_raw->size); - - /* Make sure that we got at least the API version number */ - if (ucode_raw->size < 4) { - IWL_ERR(drv, "File size way too small!\n"); - goto try_again; - } - - /* Data from ucode file: header followed by uCode images */ - ucode = (struct iwl_ucode_header *)ucode_raw->data; - - if (ucode->ver) - err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); - else - err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, - &fw->ucode_capa); - - if (err) - goto try_again; - - if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) - api_ver = drv->fw.ucode_ver; - else - api_ver = IWL_UCODE_API(drv->fw.ucode_ver); - - /* - * api_ver should match the api version forming part of the - * firmware filename ... but we don't check for that and only rely - * on the API version read from firmware header from here on forward - */ - /* no api version check required for experimental uCode */ - if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) { - if (api_ver < api_min || api_ver > api_max) { - IWL_ERR(drv, - "Driver unable to support your firmware API. " - "Driver supports v%u, firmware is v%u.\n", - api_max, api_ver); - goto try_again; - } - - if (api_ver < api_ok) { - if (api_ok != api_max) - IWL_ERR(drv, "Firmware has old API version, " - "expected v%u through v%u, got v%u.\n", - api_ok, api_max, api_ver); - else - IWL_ERR(drv, "Firmware has old API version, " - "expected v%u, got v%u.\n", - api_max, api_ver); - IWL_ERR(drv, "New firmware can be obtained from " - "http://www.intellinuxwireless.org/.\n"); - } - } - - /* - * In mvm uCode there is no difference between data and instructions - * sections. - */ - if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg)) - goto try_again; - - /* Allocate ucode buffers for card's bus-master loading ... */ - - /* Runtime instructions and 2 copies of data: - * 1) unmodified from disk - * 2) backup cache for save/restore during power-downs */ - for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) - if (iwl_alloc_ucode(drv, pieces, i)) - goto out_free_fw; - - if (pieces->dbg_dest_tlv) { - drv->fw.dbg_dest_tlv = - kmemdup(pieces->dbg_dest_tlv, - sizeof(*pieces->dbg_dest_tlv) + - sizeof(pieces->dbg_dest_tlv->reg_ops[0]) * - drv->fw.dbg_dest_reg_num, GFP_KERNEL); - - if (!drv->fw.dbg_dest_tlv) - goto out_free_fw; - } - - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { - if (pieces->dbg_conf_tlv[i]) { - drv->fw.dbg_conf_tlv_len[i] = - pieces->dbg_conf_tlv_len[i]; - drv->fw.dbg_conf_tlv[i] = - kmemdup(pieces->dbg_conf_tlv[i], - drv->fw.dbg_conf_tlv_len[i], - GFP_KERNEL); - if (!drv->fw.dbg_conf_tlv[i]) - goto out_free_fw; - } - } - - memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz)); - - trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] = - sizeof(struct iwl_fw_dbg_trigger_missed_bcon); - trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0; - trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] = - sizeof(struct iwl_fw_dbg_trigger_cmd); - trigger_tlv_sz[FW_DBG_TRIGGER_MLME] = - sizeof(struct iwl_fw_dbg_trigger_mlme); - trigger_tlv_sz[FW_DBG_TRIGGER_STATS] = - sizeof(struct iwl_fw_dbg_trigger_stats); - trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] = - sizeof(struct iwl_fw_dbg_trigger_low_rssi); - trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] = - sizeof(struct iwl_fw_dbg_trigger_txq_timer); - trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = - sizeof(struct iwl_fw_dbg_trigger_time_event); - trigger_tlv_sz[FW_DBG_TRIGGER_BA] = - sizeof(struct iwl_fw_dbg_trigger_ba); - - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { - if (pieces->dbg_trigger_tlv[i]) { - /* - * If the trigger isn't long enough, WARN and exit. - * Someone is trying to debug something and he won't - * be able to catch the bug he is trying to chase. - * We'd better be noisy to be sure he knows what's - * going on. - */ - if (WARN_ON(pieces->dbg_trigger_tlv_len[i] < - (trigger_tlv_sz[i] + - sizeof(struct iwl_fw_dbg_trigger_tlv)))) - goto out_free_fw; - drv->fw.dbg_trigger_tlv_len[i] = - pieces->dbg_trigger_tlv_len[i]; - drv->fw.dbg_trigger_tlv[i] = - kmemdup(pieces->dbg_trigger_tlv[i], - drv->fw.dbg_trigger_tlv_len[i], - GFP_KERNEL); - if (!drv->fw.dbg_trigger_tlv[i]) - goto out_free_fw; - } - } - - /* Now that we can no longer fail, copy information */ - - /* - * The (size - 16) / 12 formula is based on the information recorded - * for each event, which is of mode 1 (including timestamp) for all - * new microcodes that include this information. - */ - fw->init_evtlog_ptr = pieces->init_evtlog_ptr; - if (pieces->init_evtlog_size) - fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; - else - fw->init_evtlog_size = - drv->cfg->base_params->max_event_log_size; - fw->init_errlog_ptr = pieces->init_errlog_ptr; - fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; - if (pieces->inst_evtlog_size) - fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; - else - fw->inst_evtlog_size = - drv->cfg->base_params->max_event_log_size; - fw->inst_errlog_ptr = pieces->inst_errlog_ptr; - - /* - * figure out the offset of chain noise reset and gain commands - * base on the size of standard phy calibration commands table size - */ - if (fw->ucode_capa.standard_phy_calibration_size > - IWL_MAX_PHY_CALIBRATE_TBL_SIZE) - fw->ucode_capa.standard_phy_calibration_size = - IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; - - /* We have our copies now, allow OS release its copies */ - release_firmware(ucode_raw); - - mutex_lock(&iwlwifi_opmode_table_mtx); - if (fw->mvm_fw) - op = &iwlwifi_opmode_table[MVM_OP_MODE]; - else - op = &iwlwifi_opmode_table[DVM_OP_MODE]; - - IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", - drv->fw.fw_version, op->name); - - /* add this device to the list of devices using this op_mode */ - list_add_tail(&drv->list, &op->drv); - - if (op->ops) { - drv->op_mode = _iwl_op_mode_start(drv, op); - - if (!drv->op_mode) { - mutex_unlock(&iwlwifi_opmode_table_mtx); - goto out_unbind; - } - } else { - load_module = true; - } - mutex_unlock(&iwlwifi_opmode_table_mtx); - - /* - * Complete the firmware request last so that - * a driver unbind (stop) doesn't run while we - * are doing the start() above. - */ - complete(&drv->request_firmware_complete); - - /* - * Load the module last so we don't block anything - * else from proceeding if the module fails to load - * or hangs loading. - */ - if (load_module) { - err = request_module("%s", op->name); -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR - if (err) - IWL_ERR(drv, - "failed to load module %s (error %d), is dynamic loading enabled?\n", - op->name, err); -#endif - } - kfree(pieces); - return; - - try_again: - /* try next, if any */ - release_firmware(ucode_raw); - if (iwl_request_firmware(drv, false)) - goto out_unbind; - kfree(pieces); - return; - - out_free_fw: - IWL_ERR(drv, "failed to allocate pci memory\n"); - iwl_dealloc_ucode(drv); - release_firmware(ucode_raw); - out_unbind: - kfree(pieces); - complete(&drv->request_firmware_complete); - device_release_driver(drv->trans->dev); -} - -struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, - const struct iwl_cfg *cfg) -{ - struct iwl_drv *drv; - int ret; - - drv = kzalloc(sizeof(*drv), GFP_KERNEL); - if (!drv) { - ret = -ENOMEM; - goto err; - } - - drv->trans = trans; - drv->dev = trans->dev; - drv->cfg = cfg; - - init_completion(&drv->request_firmware_complete); - INIT_LIST_HEAD(&drv->list); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* Create the device debugfs entries. */ - drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), - iwl_dbgfs_root); - - if (!drv->dbgfs_drv) { - IWL_ERR(drv, "failed to create debugfs directory\n"); - ret = -ENOMEM; - goto err_free_drv; - } - - /* Create transport layer debugfs dir */ - drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); - - if (!drv->trans->dbgfs_dir) { - IWL_ERR(drv, "failed to create transport debugfs directory\n"); - ret = -ENOMEM; - goto err_free_dbgfs; - } -#endif - - ret = iwl_request_firmware(drv, true); - if (ret) { - IWL_ERR(trans, "Couldn't request the fw\n"); - goto err_fw; - } - - return drv; - -err_fw: -#ifdef CONFIG_IWLWIFI_DEBUGFS -err_free_dbgfs: - debugfs_remove_recursive(drv->dbgfs_drv); -err_free_drv: -#endif - kfree(drv); -err: - return ERR_PTR(ret); -} - -void iwl_drv_stop(struct iwl_drv *drv) -{ - wait_for_completion(&drv->request_firmware_complete); - - _iwl_op_mode_stop(drv); - - iwl_dealloc_ucode(drv); - - mutex_lock(&iwlwifi_opmode_table_mtx); - /* - * List is empty (this item wasn't added) - * when firmware loading failed -- in that - * case we can't remove it from any list. - */ - if (!list_empty(&drv->list)) - list_del(&drv->list); - mutex_unlock(&iwlwifi_opmode_table_mtx); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - debugfs_remove_recursive(drv->dbgfs_drv); -#endif - - kfree(drv); -} - - -/* shared module parameters */ -struct iwl_mod_params iwlwifi_mod_params = { - .restart_fw = true, - .bt_coex_active = true, - .power_level = IWL_POWER_INDEX_1, - .d0i3_disable = true, -#ifndef CONFIG_IWLWIFI_UAPSD - .uapsd_disable = true, -#endif /* CONFIG_IWLWIFI_UAPSD */ - /* the rest are 0 by default */ -}; -IWL_EXPORT_SYMBOL(iwlwifi_mod_params); - -int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) -{ - int i; - struct iwl_drv *drv; - struct iwlwifi_opmode_table *op; - - mutex_lock(&iwlwifi_opmode_table_mtx); - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { - op = &iwlwifi_opmode_table[i]; - if (strcmp(op->name, name)) - continue; - op->ops = ops; - /* TODO: need to handle exceptional case */ - list_for_each_entry(drv, &op->drv, list) - drv->op_mode = _iwl_op_mode_start(drv, op); - - mutex_unlock(&iwlwifi_opmode_table_mtx); - return 0; - } - mutex_unlock(&iwlwifi_opmode_table_mtx); - return -EIO; -} -IWL_EXPORT_SYMBOL(iwl_opmode_register); - -void iwl_opmode_deregister(const char *name) -{ - int i; - struct iwl_drv *drv; - - mutex_lock(&iwlwifi_opmode_table_mtx); - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { - if (strcmp(iwlwifi_opmode_table[i].name, name)) - continue; - iwlwifi_opmode_table[i].ops = NULL; - - /* call the stop routine for all devices */ - list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) - _iwl_op_mode_stop(drv); - - mutex_unlock(&iwlwifi_opmode_table_mtx); - return; - } - mutex_unlock(&iwlwifi_opmode_table_mtx); -} -IWL_EXPORT_SYMBOL(iwl_opmode_deregister); - -static int __init iwl_drv_init(void) -{ - int i; - - mutex_init(&iwlwifi_opmode_table_mtx); - - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) - INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); - - pr_info(DRV_DESCRIPTION "\n"); - pr_info(DRV_COPYRIGHT "\n"); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* Create the root of iwlwifi debugfs subsystem. */ - iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); - - if (!iwl_dbgfs_root) - return -EFAULT; -#endif - - return iwl_pci_register_driver(); -} -module_init(iwl_drv_init); - -static void __exit iwl_drv_exit(void) -{ - iwl_pci_unregister_driver(); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - debugfs_remove_recursive(iwl_dbgfs_root); -#endif -} -module_exit(iwl_drv_exit); - -#ifdef CONFIG_IWLWIFI_DEBUG -module_param_named(debug, iwlwifi_mod_params.debug_level, uint, - S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif - -module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO); -MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); -module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); -MODULE_PARM_DESC(11n_disable, - "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); -module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, - int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); -module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO); -MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); - -module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling, - int, S_IRUGO); -MODULE_PARM_DESC(antenna_coupling, - "specify antenna coupling in dB (default: 0 dB)"); - -module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); -MODULE_PARM_DESC(nvm_file, "NVM file name"); - -module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, - bool, S_IRUGO); -MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); - -module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, - bool, S_IRUGO); -MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); - -module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, - bool, S_IRUGO | S_IWUSR); -#ifdef CONFIG_IWLWIFI_UAPSD -MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); -#else -MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)"); -#endif - -/* - * set bt_coex_active to true, uCode will do kill/defer - * every time the priority line is asserted (BT is sending signals on the - * priority line in the PCIx). - * set bt_coex_active to false, uCode will ignore the BT activity and - * perform the normal operation - * - * User might experience transmit issue on some platform due to WiFi/BT - * co-exist problem. The possible behaviors are: - * Able to scan and finding all the available AP - * Not able to associate with any AP - * On those platforms, WiFi communication can be restored by set - * "bt_coex_active" module parameter to "false" - * - * default: bt_coex_active = true (BT_COEX_ENABLE) - */ -module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, - bool, S_IRUGO); -MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); - -module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO); -MODULE_PARM_DESC(led_mode, "0=system default, " - "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); - -module_param_named(power_save, iwlwifi_mod_params.power_save, - bool, S_IRUGO); -MODULE_PARM_DESC(power_save, - "enable WiFi power management (default: disable)"); - -module_param_named(power_level, iwlwifi_mod_params.power_level, - int, S_IRUGO); -MODULE_PARM_DESC(power_level, - "default power save level (range from 1 - 5, default: 1)"); - -module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, S_IRUGO); -MODULE_PARM_DESC(fw_monitor, - "firmware monitor - to debug FW (default: false - needs lots of memory)"); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h deleted file mode 100644 index cda746b33db1..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ /dev/null @@ -1,155 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_drv_h__ -#define __iwl_drv_h__ -#include - -/* for all modules */ -#define DRV_NAME "iwlwifi" -#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation" -#define DRV_AUTHOR "" - -/* radio config bits (actual values from NVM definition) */ -#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ -#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ -#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ -#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ -#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ -#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ - -#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF) -#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF) -#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF) -#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF) -#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF) -#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF) - -/** - * DOC: Driver system flows - drv component - * - * This component implements the system flows such as bus enumeration, bus - * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in - * bus specific files (transport files). This is the code that is common among - * different buses. - * - * This component is also in charge of managing the several implementations of - * the wifi flows: it will allow to have several fw API implementation. These - * different implementations will differ in the way they implement mac80211's - * handlers too. - - * The init flow wrt to the drv component looks like this: - * 1) The bus specific component is called from module_init - * 2) The bus specific component registers the bus driver - * 3) The bus driver calls the probe function - * 4) The bus specific component configures the bus - * 5) The bus specific component calls to the drv bus agnostic part - * (iwl_drv_start) - * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback - * 7) iwl_req_fw_callback parses the fw file - * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw - */ - -struct iwl_drv; -struct iwl_trans; -struct iwl_cfg; -/** - * iwl_drv_start - start the drv - * - * @trans_ops: the ops of the transport - * @cfg: device specific constants / virtual functions - * - * starts the driver: fetches the firmware. This should be called by bus - * specific system flows implementations. For example, the bus specific probe - * function should do bus related operations only, and then call to this - * function. It returns the driver object or %NULL if an error occurred. - */ -struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, - const struct iwl_cfg *cfg); - -/** - * iwl_drv_stop - stop the drv - * - * @drv: - * - * Stop the driver. This should be called by bus specific system flows - * implementations. For example, the bus specific remove function should first - * call this function and then do the bus related operations only. - */ -void iwl_drv_stop(struct iwl_drv *drv); - -/* - * exported symbol management - * - * The driver can be split into multiple modules, in which case some symbols - * must be exported for the sub-modules. However, if it's not split and - * everything is built-in, then we can avoid that. - */ -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR -#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym) -#else -#define IWL_EXPORT_SYMBOL(sym) -#endif - -#endif /* __iwl_drv_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c deleted file mode 100644 index acc3d186c5c1..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ /dev/null @@ -1,947 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#include -#include -#include -#include "iwl-drv.h" -#include "iwl-modparams.h" -#include "iwl-eeprom-parse.h" - -/* EEPROM offset definitions */ - -/* indirect access definitions */ -#define ADDRESS_MSK 0x0000FFFF -#define INDIRECT_TYPE_MSK 0x000F0000 -#define INDIRECT_HOST 0x00010000 -#define INDIRECT_GENERAL 0x00020000 -#define INDIRECT_REGULATORY 0x00030000 -#define INDIRECT_CALIBRATION 0x00040000 -#define INDIRECT_PROCESS_ADJST 0x00050000 -#define INDIRECT_OTHERS 0x00060000 -#define INDIRECT_TXP_LIMIT 0x00070000 -#define INDIRECT_TXP_LIMIT_SIZE 0x00080000 -#define INDIRECT_ADDRESS 0x00100000 - -/* corresponding link offsets in EEPROM */ -#define EEPROM_LINK_HOST (2*0x64) -#define EEPROM_LINK_GENERAL (2*0x65) -#define EEPROM_LINK_REGULATORY (2*0x66) -#define EEPROM_LINK_CALIBRATION (2*0x67) -#define EEPROM_LINK_PROCESS_ADJST (2*0x68) -#define EEPROM_LINK_OTHERS (2*0x69) -#define EEPROM_LINK_TXP_LIMIT (2*0x6a) -#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b) - -/* General */ -#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ -#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */ -#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ -#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ -#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ -#define EEPROM_VERSION (2*0x44) /* 2 bytes */ -#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ -#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ -#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ -#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ - -/* calibration */ -struct iwl_eeprom_calib_hdr { - u8 version; - u8 pa_type; - __le16 voltage; -} __packed; - -#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) -#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL) - -/* temperature */ -#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL) -#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL) - -/* SKU Capabilities (actual values from EEPROM definition) */ -enum eeprom_sku_bits { - EEPROM_SKU_CAP_BAND_24GHZ = BIT(4), - EEPROM_SKU_CAP_BAND_52GHZ = BIT(5), - EEPROM_SKU_CAP_11N_ENABLE = BIT(6), - EEPROM_SKU_CAP_AMT_ENABLE = BIT(7), - EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8) -}; - -/* radio config bits (actual values from EEPROM definition) */ -#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ -#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ -#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ -#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ -#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ -#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ - - -/* - * EEPROM bands - * These are the channel numbers from each band in the order - * that they are stored in the EEPROM band information. Note - * that EEPROM bands aren't the same as mac80211 bands, and - * there are even special "ht40 bands" in the EEPROM. - */ -static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 -}; - -static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */ - 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 -}; - -static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */ - 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 -}; - -static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ - 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 -}; - -static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ - 145, 149, 153, 157, 161, 165 -}; - -static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */ - 1, 2, 3, 4, 5, 6, 7 -}; - -static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ - 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 -}; - -#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \ - ARRAY_SIZE(iwl_eeprom_band_2) + \ - ARRAY_SIZE(iwl_eeprom_band_3) + \ - ARRAY_SIZE(iwl_eeprom_band_4) + \ - ARRAY_SIZE(iwl_eeprom_band_5)) - -/* rate data (static) */ -static struct ieee80211_rate iwl_cfg80211_rates[] = { - { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, - { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, - .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, - { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, - .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, - { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, - .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, - { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, - { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, - { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, - { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, - { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, - { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, - { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, - { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, -}; -#define RATES_24_OFFS 0 -#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) -#define RATES_52_OFFS 4 -#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) - -/* EEPROM reading functions */ - -static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset) -{ - if (WARN_ON(offset + sizeof(u16) > eeprom_size)) - return 0; - return le16_to_cpup((__le16 *)(eeprom + offset)); -} - -static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size, - u32 address) -{ - u16 offset = 0; - - if ((address & INDIRECT_ADDRESS) == 0) - return address; - - switch (address & INDIRECT_TYPE_MSK) { - case INDIRECT_HOST: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_HOST); - break; - case INDIRECT_GENERAL: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_GENERAL); - break; - case INDIRECT_REGULATORY: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_REGULATORY); - break; - case INDIRECT_TXP_LIMIT: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_TXP_LIMIT); - break; - case INDIRECT_TXP_LIMIT_SIZE: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_TXP_LIMIT_SIZE); - break; - case INDIRECT_CALIBRATION: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_CALIBRATION); - break; - case INDIRECT_PROCESS_ADJST: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_PROCESS_ADJST); - break; - case INDIRECT_OTHERS: - offset = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_LINK_OTHERS); - break; - default: - WARN_ON(1); - break; - } - - /* translate the offset from words to byte */ - return (address & ADDRESS_MSK) + (offset << 1); -} - -static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size, - u32 offset) -{ - u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset); - - if (WARN_ON(address >= eeprom_size)) - return NULL; - - return &eeprom[address]; -} - -static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size, - struct iwl_nvm_data *data) -{ - struct iwl_eeprom_calib_hdr *hdr; - - hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_CALIB_ALL); - if (!hdr) - return -ENODATA; - data->calib_version = hdr->version; - data->calib_voltage = hdr->voltage; - - return 0; -} - -/** - * enum iwl_eeprom_channel_flags - channel flags in EEPROM - * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo - * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel - * @EEPROM_CHANNEL_ACTIVE: active scanning allowed - * @EEPROM_CHANNEL_RADAR: radar detection required - * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?) - * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate - */ -enum iwl_eeprom_channel_flags { - EEPROM_CHANNEL_VALID = BIT(0), - EEPROM_CHANNEL_IBSS = BIT(1), - EEPROM_CHANNEL_ACTIVE = BIT(3), - EEPROM_CHANNEL_RADAR = BIT(4), - EEPROM_CHANNEL_WIDE = BIT(5), - EEPROM_CHANNEL_DFS = BIT(7), -}; - -/** - * struct iwl_eeprom_channel - EEPROM channel data - * @flags: %EEPROM_CHANNEL_* flags - * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm - */ -struct iwl_eeprom_channel { - u8 flags; - s8 max_power_avg; -} __packed; - - -enum iwl_eeprom_enhanced_txpwr_flags { - IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0), - IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1), - IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2), - IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3), - IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4), - IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5), - IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6), - IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7), -}; - -/** - * iwl_eeprom_enhanced_txpwr structure - * @flags: entry flags - * @channel: channel number - * @chain_a_max_pwr: chain a max power in 1/2 dBm - * @chain_b_max_pwr: chain b max power in 1/2 dBm - * @chain_c_max_pwr: chain c max power in 1/2 dBm - * @delta_20_in_40: 20-in-40 deltas (hi/lo) - * @mimo2_max_pwr: mimo2 max power in 1/2 dBm - * @mimo3_max_pwr: mimo3 max power in 1/2 dBm - * - * This structure presents the enhanced regulatory tx power limit layout - * in an EEPROM image. - */ -struct iwl_eeprom_enhanced_txpwr { - u8 flags; - u8 channel; - s8 chain_a_max; - s8 chain_b_max; - s8 chain_c_max; - u8 delta_20_in_40; - s8 mimo2_max; - s8 mimo3_max; -} __packed; - -static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data, - struct iwl_eeprom_enhanced_txpwr *txp) -{ - s8 result = 0; /* (.5 dBm) */ - - /* Take the highest tx power from any valid chains */ - if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result) - result = txp->chain_a_max; - - if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result) - result = txp->chain_b_max; - - if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result) - result = txp->chain_c_max; - - if ((data->valid_tx_ant == ANT_AB || - data->valid_tx_ant == ANT_BC || - data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result) - result = txp->mimo2_max; - - if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result) - result = txp->mimo3_max; - - return result; -} - -#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT) -#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr) -#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE) - -#define TXP_CHECK_AND_PRINT(x) \ - ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "") - -static void -iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, - struct iwl_eeprom_enhanced_txpwr *txp, - int n_channels, s8 max_txpower_avg) -{ - int ch_idx; - enum ieee80211_band band; - - band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? - IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; - - for (ch_idx = 0; ch_idx < n_channels; ch_idx++) { - struct ieee80211_channel *chan = &data->channels[ch_idx]; - - /* update matching channel or from common data only */ - if (txp->channel != 0 && chan->hw_value != txp->channel) - continue; - - /* update matching band only */ - if (band != chan->band) - continue; - - if (chan->max_power < max_txpower_avg && - !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ)) - chan->max_power = max_txpower_avg; - } -} - -static void iwl_eeprom_enhanced_txpower(struct device *dev, - struct iwl_nvm_data *data, - const u8 *eeprom, size_t eeprom_size, - int n_channels) -{ - struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; - int idx, entries; - __le16 *txp_len; - s8 max_txp_avg_halfdbm; - - BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); - - /* the length is in 16-bit words, but we want entries */ - txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_TXP_SZ_OFFS); - entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; - - txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_TXP_OFFS); - - for (idx = 0; idx < entries; idx++) { - txp = &txp_array[idx]; - /* skip invalid entries */ - if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) - continue; - - IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n", - (txp->channel && (txp->flags & - IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ? - "Common " : (txp->channel) ? - "Channel" : "Common", - (txp->channel), - TXP_CHECK_AND_PRINT(VALID), - TXP_CHECK_AND_PRINT(BAND_52G), - TXP_CHECK_AND_PRINT(OFDM), - TXP_CHECK_AND_PRINT(40MHZ), - TXP_CHECK_AND_PRINT(HT_AP), - TXP_CHECK_AND_PRINT(RES1), - TXP_CHECK_AND_PRINT(RES2), - TXP_CHECK_AND_PRINT(COMMON_TYPE), - txp->flags); - IWL_DEBUG_EEPROM(dev, - "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n", - txp->chain_a_max, txp->chain_b_max, - txp->chain_c_max); - IWL_DEBUG_EEPROM(dev, - "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n", - txp->mimo2_max, txp->mimo3_max, - ((txp->delta_20_in_40 & 0xf0) >> 4), - (txp->delta_20_in_40 & 0x0f)); - - max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp); - - iwl_eeprom_enh_txp_read_element(data, txp, n_channels, - DIV_ROUND_UP(max_txp_avg_halfdbm, 2)); - - if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm) - data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm; - } -} - -static void iwl_init_band_reference(const struct iwl_cfg *cfg, - const u8 *eeprom, size_t eeprom_size, - int eeprom_band, int *eeprom_ch_count, - const struct iwl_eeprom_channel **ch_info, - const u8 **eeprom_ch_array) -{ - u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1]; - - offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY; - - *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset); - - switch (eeprom_band) { - case 1: /* 2.4GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); - *eeprom_ch_array = iwl_eeprom_band_1; - break; - case 2: /* 4.9GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); - *eeprom_ch_array = iwl_eeprom_band_2; - break; - case 3: /* 5.2GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); - *eeprom_ch_array = iwl_eeprom_band_3; - break; - case 4: /* 5.5GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); - *eeprom_ch_array = iwl_eeprom_band_4; - break; - case 5: /* 5.7GHz band */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); - *eeprom_ch_array = iwl_eeprom_band_5; - break; - case 6: /* 2.4GHz ht40 channels */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); - *eeprom_ch_array = iwl_eeprom_band_6; - break; - case 7: /* 5 GHz ht40 channels */ - *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); - *eeprom_ch_array = iwl_eeprom_band_7; - break; - default: - *eeprom_ch_count = 0; - *eeprom_ch_array = NULL; - WARN_ON(1); - } -} - -#define CHECK_AND_PRINT(x) \ - ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "") - -static void iwl_mod_ht40_chan_info(struct device *dev, - struct iwl_nvm_data *data, int n_channels, - enum ieee80211_band band, u16 channel, - const struct iwl_eeprom_channel *eeprom_ch, - u8 clear_ht40_extension_channel) -{ - struct ieee80211_channel *chan = NULL; - int i; - - for (i = 0; i < n_channels; i++) { - if (data->channels[i].band != band) - continue; - if (data->channels[i].hw_value != channel) - continue; - chan = &data->channels[i]; - break; - } - - if (!chan) - return; - - IWL_DEBUG_EEPROM(dev, - "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", - channel, - band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", - CHECK_AND_PRINT(IBSS), - CHECK_AND_PRINT(ACTIVE), - CHECK_AND_PRINT(RADAR), - CHECK_AND_PRINT(WIDE), - CHECK_AND_PRINT(DFS), - eeprom_ch->flags, - eeprom_ch->max_power_avg, - ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && - !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" - : "not "); - - if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) - chan->flags &= ~clear_ht40_extension_channel; -} - -#define CHECK_AND_PRINT_I(x) \ - ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "") - -static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const u8 *eeprom, size_t eeprom_size) -{ - int band, ch_idx; - const struct iwl_eeprom_channel *eeprom_ch_info; - const u8 *eeprom_ch_array; - int eeprom_ch_count; - int n_channels = 0; - - /* - * Loop through the 5 EEPROM bands and add them to the parse list - */ - for (band = 1; band <= 5; band++) { - struct ieee80211_channel *channel; - - iwl_init_band_reference(cfg, eeprom, eeprom_size, band, - &eeprom_ch_count, &eeprom_ch_info, - &eeprom_ch_array); - - /* Loop through each band adding each of the channels */ - for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { - const struct iwl_eeprom_channel *eeprom_ch; - - eeprom_ch = &eeprom_ch_info[ch_idx]; - - if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) { - IWL_DEBUG_EEPROM(dev, - "Ch. %d Flags %x [%sGHz] - No traffic\n", - eeprom_ch_array[ch_idx], - eeprom_ch_info[ch_idx].flags, - (band != 1) ? "5.2" : "2.4"); - continue; - } - - channel = &data->channels[n_channels]; - n_channels++; - - channel->hw_value = eeprom_ch_array[ch_idx]; - channel->band = (band == 1) ? IEEE80211_BAND_2GHZ - : IEEE80211_BAND_5GHZ; - channel->center_freq = - ieee80211_channel_to_frequency( - channel->hw_value, channel->band); - - /* set no-HT40, will enable as appropriate later */ - channel->flags = IEEE80211_CHAN_NO_HT40; - - if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS)) - channel->flags |= IEEE80211_CHAN_NO_IR; - - if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE)) - channel->flags |= IEEE80211_CHAN_NO_IR; - - if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR) - channel->flags |= IEEE80211_CHAN_RADAR; - - /* Initialize regulatory-based run-time data */ - channel->max_power = - eeprom_ch_info[ch_idx].max_power_avg; - IWL_DEBUG_EEPROM(dev, - "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", - channel->hw_value, - (band != 1) ? "5.2" : "2.4", - CHECK_AND_PRINT_I(VALID), - CHECK_AND_PRINT_I(IBSS), - CHECK_AND_PRINT_I(ACTIVE), - CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), - CHECK_AND_PRINT_I(DFS), - eeprom_ch_info[ch_idx].flags, - eeprom_ch_info[ch_idx].max_power_avg, - ((eeprom_ch_info[ch_idx].flags & - EEPROM_CHANNEL_IBSS) && - !(eeprom_ch_info[ch_idx].flags & - EEPROM_CHANNEL_RADAR)) - ? "" : "not "); - } - } - - if (cfg->eeprom_params->enhanced_txpower) { - /* - * for newer device (6000 series and up) - * EEPROM contain enhanced tx power information - * driver need to process addition information - * to determine the max channel tx power limits - */ - iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size, - n_channels); - } else { - /* All others use data from channel map */ - int i; - - data->max_tx_pwr_half_dbm = -128; - - for (i = 0; i < n_channels; i++) - data->max_tx_pwr_half_dbm = - max_t(s8, data->max_tx_pwr_half_dbm, - data->channels[i].max_power * 2); - } - - /* Check if we do have HT40 channels */ - if (cfg->eeprom_params->regulatory_bands[5] == - EEPROM_REGULATORY_BAND_NO_HT40 && - cfg->eeprom_params->regulatory_bands[6] == - EEPROM_REGULATORY_BAND_NO_HT40) - return n_channels; - - /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ - for (band = 6; band <= 7; band++) { - enum ieee80211_band ieeeband; - - iwl_init_band_reference(cfg, eeprom, eeprom_size, band, - &eeprom_ch_count, &eeprom_ch_info, - &eeprom_ch_array); - - /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ - ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ - : IEEE80211_BAND_5GHZ; - - /* Loop through each band adding each of the channels */ - for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { - /* Set up driver's info for lower half */ - iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, - eeprom_ch_array[ch_idx], - &eeprom_ch_info[ch_idx], - IEEE80211_CHAN_NO_HT40PLUS); - - /* Set up driver's info for upper half */ - iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, - eeprom_ch_array[ch_idx] + 4, - &eeprom_ch_info[ch_idx], - IEEE80211_CHAN_NO_HT40MINUS); - } - } - - return n_channels; -} - -int iwl_init_sband_channels(struct iwl_nvm_data *data, - struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band) -{ - struct ieee80211_channel *chan = &data->channels[0]; - int n = 0, idx = 0; - - while (idx < n_channels && chan->band != band) - chan = &data->channels[++idx]; - - sband->channels = &data->channels[idx]; - - while (idx < n_channels && chan->band == band) { - chan = &data->channels[++idx]; - n++; - } - - sband->n_channels = n; - - return n; -} - -#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ -#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ - -void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band, - u8 tx_chains, u8 rx_chains) -{ - int max_bit_rate = 0; - - tx_chains = hweight8(tx_chains); - if (cfg->rx_with_siso_diversity) - rx_chains = 1; - else - rx_chains = hweight8(rx_chains); - - if (!(data->sku_cap_11n_enable) || !cfg->ht_params) { - ht_info->ht_supported = false; - return; - } - - if (data->sku_cap_mimo_disabled) - rx_chains = 1; - - ht_info->ht_supported = true; - ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; - - if (cfg->ht_params->stbc) { - ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); - - if (tx_chains > 1) - ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; - } - - if (cfg->ht_params->ldpc) - ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; - - if (iwlwifi_mod_params.amsdu_size_8K) - ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - - ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent; - ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; - - ht_info->mcs.rx_mask[0] = 0xFF; - if (rx_chains >= 2) - ht_info->mcs.rx_mask[1] = 0xFF; - if (rx_chains >= 3) - ht_info->mcs.rx_mask[2] = 0xFF; - - if (cfg->ht_params->ht_greenfield_support) - ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; - ht_info->cap |= IEEE80211_HT_CAP_SGI_20; - - max_bit_rate = MAX_BIT_RATE_20_MHZ; - - if (cfg->ht_params->ht40_bands & BIT(band)) { - ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_info->cap |= IEEE80211_HT_CAP_SGI_40; - max_bit_rate = MAX_BIT_RATE_40_MHZ; - } - - /* Highest supported Rx data rate */ - max_bit_rate *= rx_chains; - WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); - ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); - - /* Tx MCS capabilities */ - ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (tx_chains != rx_chains) { - ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; - ht_info->mcs.tx_params |= ((tx_chains - 1) << - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); - } -} - -static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const u8 *eeprom, size_t eeprom_size) -{ - int n_channels = iwl_init_channel_map(dev, cfg, data, - eeprom, eeprom_size); - int n_used = 0; - struct ieee80211_supported_band *sband; - - sband = &data->bands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; - sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; - sband->n_bitrates = N_RATES_24; - n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, - data->valid_tx_ant, data->valid_rx_ant); - - sband = &data->bands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; - sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; - sband->n_bitrates = N_RATES_52; - n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, - data->valid_tx_ant, data->valid_rx_ant); - - if (n_channels != n_used) - IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", - n_used, n_channels); -} - -/* EEPROM data functions */ - -struct iwl_nvm_data * -iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, - const u8 *eeprom, size_t eeprom_size) -{ - struct iwl_nvm_data *data; - const void *tmp; - u16 radio_cfg, sku; - - if (WARN_ON(!cfg || !cfg->eeprom_params)) - return NULL; - - data = kzalloc(sizeof(*data) + - sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, - GFP_KERNEL); - if (!data) - return NULL; - - /* get MAC address(es) */ - tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS); - if (!tmp) - goto err_free; - memcpy(data->hw_addr, tmp, ETH_ALEN); - data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_NUM_MAC_ADDRESS); - - if (iwl_eeprom_read_calib(eeprom, eeprom_size, data)) - goto err_free; - - tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL); - if (!tmp) - goto err_free; - memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib)); - - tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_RAW_TEMPERATURE); - if (!tmp) - goto err_free; - data->raw_temperature = *(__le16 *)tmp; - - tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_KELVIN_TEMPERATURE); - if (!tmp) - goto err_free; - data->kelvin_temperature = *(__le16 *)tmp; - data->kelvin_voltage = *((__le16 *)tmp + 1); - - radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_RADIO_CONFIG); - data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg); - data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg); - data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg); - data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg); - data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); - data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); - - sku = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_SKU_CAP); - data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE; - data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE; - data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; - data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; - data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE; - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) - data->sku_cap_11n_enable = false; - - data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_VERSION); - - /* check overrides (some devices have wrong EEPROM) */ - if (cfg->valid_tx_ant) - data->valid_tx_ant = cfg->valid_tx_ant; - if (cfg->valid_rx_ant) - data->valid_rx_ant = cfg->valid_rx_ant; - - if (!data->valid_tx_ant || !data->valid_rx_ant) { - IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", - data->valid_tx_ant, data->valid_rx_ant); - goto err_free; - } - - iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size); - - return data; - err_free: - kfree(data); - return NULL; -} -IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); - -/* helper functions */ -int iwl_nvm_check_version(struct iwl_nvm_data *data, - struct iwl_trans *trans) -{ - if (data->nvm_version >= trans->cfg->nvm_ver || - data->calib_version >= trans->cfg->nvm_calib_ver) { - IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", - data->nvm_version, data->calib_version); - return 0; - } - - IWL_ERR(trans, - "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", - data->nvm_version, trans->cfg->nvm_ver, - data->calib_version, trans->cfg->nvm_calib_ver); - return -EINVAL; -} -IWL_EXPORT_SYMBOL(iwl_nvm_check_version); diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h deleted file mode 100644 index 750c8c9ee70d..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ /dev/null @@ -1,144 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#ifndef __iwl_eeprom_parse_h__ -#define __iwl_eeprom_parse_h__ - -#include -#include -#include "iwl-trans.h" - -struct iwl_nvm_data { - int n_hw_addrs; - u8 hw_addr[ETH_ALEN]; - - u8 calib_version; - __le16 calib_voltage; - - __le16 raw_temperature; - __le16 kelvin_temperature; - __le16 kelvin_voltage; - __le16 xtal_calib[2]; - - bool sku_cap_band_24GHz_enable; - bool sku_cap_band_52GHz_enable; - bool sku_cap_11n_enable; - bool sku_cap_11ac_enable; - bool sku_cap_amt_enable; - bool sku_cap_ipan_enable; - bool sku_cap_mimo_disabled; - - u16 radio_cfg_type; - u8 radio_cfg_step; - u8 radio_cfg_dash; - u8 radio_cfg_pnum; - u8 valid_tx_ant, valid_rx_ant; - - u32 nvm_version; - s8 max_tx_pwr_half_dbm; - - bool lar_enabled; - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; - struct ieee80211_channel channels[]; -}; - -/** - * iwl_parse_eeprom_data - parse EEPROM data and return values - * - * @dev: device pointer we're parsing for, for debug only - * @cfg: device configuration for parsing and overrides - * @eeprom: the EEPROM data - * @eeprom_size: length of the EEPROM data - * - * This function parses all EEPROM values we need and then - * returns a (newly allocated) struct containing all the - * relevant values for driver use. The struct must be freed - * later with iwl_free_nvm_data(). - */ -struct iwl_nvm_data * -iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, - const u8 *eeprom, size_t eeprom_size); - -/** - * iwl_free_nvm_data - free NVM data - * @data: the data to free - */ -static inline void iwl_free_nvm_data(struct iwl_nvm_data *data) -{ - kfree(data); -} - -int iwl_nvm_check_version(struct iwl_nvm_data *data, - struct iwl_trans *trans); - -int iwl_init_sband_channels(struct iwl_nvm_data *data, - struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band); - -void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band, - u8 tx_chains, u8 rx_chains); - -#endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c deleted file mode 100644 index 219ca8acca62..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c +++ /dev/null @@ -1,464 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#include -#include -#include - -#include "iwl-drv.h" -#include "iwl-debug.h" -#include "iwl-eeprom-read.h" -#include "iwl-io.h" -#include "iwl-prph.h" -#include "iwl-csr.h" - -/* - * EEPROM access time values: - * - * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. - * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). - * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. - * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. - */ -#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ - -#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ -#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - - -/* - * The device's EEPROM semaphore prevents conflicts between driver and uCode - * when accessing the EEPROM; each access is a series of pulses to/from the - * EEPROM chip, not a single event, so even reads could conflict if they - * weren't arbitrated by the semaphore. - */ - -#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ -#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - -static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) -{ - u16 count; - int ret; - - for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { - /* Request semaphore */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - - /* See if we got it */ - ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - EEPROM_SEM_TIMEOUT); - if (ret >= 0) { - IWL_DEBUG_EEPROM(trans->dev, - "Acquired semaphore after %d tries.\n", - count+1); - return ret; - } - } - - return ret; -} - -static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) -{ - iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); -} - -static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) -{ - u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - - IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); - - switch (gp) { - case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: - if (!nvm_is_otp) { - IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", - gp); - return -ENOENT; - } - return 0; - case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: - case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - if (nvm_is_otp) { - IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); - return -ENOENT; - } - return 0; - case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: - default: - IWL_ERR(trans, - "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", - nvm_is_otp ? "OTP" : "EEPROM", gp); - return -ENOENT; - } -} - -/****************************************************************************** - * - * OTP related functions - * -******************************************************************************/ - -static void iwl_set_otp_access_absolute(struct iwl_trans *trans) -{ - iwl_read32(trans, CSR_OTP_GP_REG); - - iwl_clear_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_OTP_ACCESS_MODE); -} - -static int iwl_nvm_is_otp(struct iwl_trans *trans) -{ - u32 otpgp; - - /* OTP only valid for CP/PP and after */ - switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { - case CSR_HW_REV_TYPE_NONE: - IWL_ERR(trans, "Unknown hardware type\n"); - return -EIO; - case CSR_HW_REV_TYPE_5300: - case CSR_HW_REV_TYPE_5350: - case CSR_HW_REV_TYPE_5100: - case CSR_HW_REV_TYPE_5150: - return 0; - default: - otpgp = iwl_read32(trans, CSR_OTP_GP_REG); - if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) - return 1; - return 0; - } -} - -static int iwl_init_otp_access(struct iwl_trans *trans) -{ - int ret; - - /* Enable 40MHz radio clock */ - iwl_write32(trans, CSR_GP_CNTRL, - iwl_read32(trans, CSR_GP_CNTRL) | - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - /* wait for clock to be ready */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - 25000); - if (ret < 0) { - IWL_ERR(trans, "Time out access OTP\n"); - } else { - iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - udelay(5); - iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - - /* - * CSR auto clock gate disable bit - - * this is only applicable for HW with OTP shadow RAM - */ - if (trans->cfg->base_params->shadow_ram_support) - iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - } - return ret; -} - -static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, - __le16 *eeprom_data) -{ - int ret = 0; - u32 r; - u32 otpgp; - - iwl_write32(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); - return ret; - } - r = iwl_read32(trans, CSR_EEPROM_REG); - /* check for ECC errors: */ - otpgp = iwl_read32(trans, CSR_OTP_GP_REG); - if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { - /* stop in this case */ - /* set the uncorrectable OTP ECC bit for acknowledgment */ - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); - return -EINVAL; - } - if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { - /* continue in this case */ - /* set the correctable OTP ECC bit for acknowledgment */ - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); - IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); - } - *eeprom_data = cpu_to_le16(r >> 16); - return 0; -} - -/* - * iwl_is_otp_empty: check for empty OTP - */ -static bool iwl_is_otp_empty(struct iwl_trans *trans) -{ - u16 next_link_addr = 0; - __le16 link_value; - bool is_empty = false; - - /* locate the beginning of OTP link list */ - if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { - if (!link_value) { - IWL_ERR(trans, "OTP is empty\n"); - is_empty = true; - } - } else { - IWL_ERR(trans, "Unable to read first block of OTP list.\n"); - is_empty = true; - } - - return is_empty; -} - - -/* - * iwl_find_otp_image: find EEPROM image in OTP - * finding the OTP block that contains the EEPROM image. - * the last valid block on the link list (the block _before_ the last block) - * is the block we should read and used to configure the device. - * If all the available OTP blocks are full, the last block will be the block - * we should read and used to configure the device. - * only perform this operation if shadow RAM is disabled - */ -static int iwl_find_otp_image(struct iwl_trans *trans, - u16 *validblockaddr) -{ - u16 next_link_addr = 0, valid_addr; - __le16 link_value = 0; - int usedblocks = 0; - - /* set addressing mode to absolute to traverse the link list */ - iwl_set_otp_access_absolute(trans); - - /* checking for empty OTP or error */ - if (iwl_is_otp_empty(trans)) - return -EINVAL; - - /* - * start traverse link list - * until reach the max number of OTP blocks - * different devices have different number of OTP blocks - */ - do { - /* save current valid block address - * check for more block on the link list - */ - valid_addr = next_link_addr; - next_link_addr = le16_to_cpu(link_value) * sizeof(u16); - IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", - usedblocks, next_link_addr); - if (iwl_read_otp_word(trans, next_link_addr, &link_value)) - return -EINVAL; - if (!link_value) { - /* - * reach the end of link list, return success and - * set address point to the starting address - * of the image - */ - *validblockaddr = valid_addr; - /* skip first 2 bytes (link list pointer) */ - *validblockaddr += 2; - return 0; - } - /* more in the link list, continue */ - usedblocks++; - } while (usedblocks <= trans->cfg->base_params->max_ll_items); - - /* OTP has no valid blocks */ - IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); - return -EINVAL; -} - -/** - * iwl_read_eeprom - read EEPROM contents - * - * Load the EEPROM contents from adapter and return it - * and its size. - * - * NOTE: This routine uses the non-debug IO access functions. - */ -int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) -{ - __le16 *e; - u32 gp = iwl_read32(trans, CSR_EEPROM_GP); - int sz; - int ret; - u16 addr; - u16 validblockaddr = 0; - u16 cache_addr = 0; - int nvm_is_otp; - - if (!eeprom || !eeprom_size) - return -EINVAL; - - nvm_is_otp = iwl_nvm_is_otp(trans); - if (nvm_is_otp < 0) - return nvm_is_otp; - - sz = trans->cfg->base_params->eeprom_size; - IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); - - e = kmalloc(sz, GFP_KERNEL); - if (!e) - return -ENOMEM; - - ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); - if (ret < 0) { - IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); - goto err_free; - } - - /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = iwl_eeprom_acquire_semaphore(trans); - if (ret < 0) { - IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); - goto err_free; - } - - if (nvm_is_otp) { - ret = iwl_init_otp_access(trans); - if (ret) { - IWL_ERR(trans, "Failed to initialize OTP access.\n"); - goto err_unlock; - } - - iwl_write32(trans, CSR_EEPROM_GP, - iwl_read32(trans, CSR_EEPROM_GP) & - ~CSR_EEPROM_GP_IF_OWNER_MSK); - - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | - CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - /* traversing the linked list if no shadow ram supported */ - if (!trans->cfg->base_params->shadow_ram_support) { - ret = iwl_find_otp_image(trans, &validblockaddr); - if (ret) - goto err_unlock; - } - for (addr = validblockaddr; addr < validblockaddr + sz; - addr += sizeof(u16)) { - __le16 eeprom_data; - - ret = iwl_read_otp_word(trans, addr, &eeprom_data); - if (ret) - goto err_unlock; - e[cache_addr / 2] = eeprom_data; - cache_addr += sizeof(u16); - } - } else { - /* eeprom is an array of 16bit values */ - for (addr = 0; addr < sz; addr += sizeof(u16)) { - u32 r; - - iwl_write32(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(trans, - "Time out reading EEPROM[%d]\n", addr); - goto err_unlock; - } - r = iwl_read32(trans, CSR_EEPROM_REG); - e[addr / 2] = cpu_to_le16(r >> 16); - } - } - - IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", - nvm_is_otp ? "OTP" : "EEPROM"); - - iwl_eeprom_release_semaphore(trans); - - *eeprom_size = sz; - *eeprom = (u8 *)e; - return 0; - - err_unlock: - iwl_eeprom_release_semaphore(trans); - err_free: - kfree(e); - - return ret; -} -IWL_EXPORT_SYMBOL(iwl_read_eeprom); diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h deleted file mode 100644 index a6d3bdf82cdd..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h +++ /dev/null @@ -1,70 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_eeprom_h__ -#define __iwl_eeprom_h__ - -#include "iwl-trans.h" - -int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); - -#endif /* __iwl_eeprom_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h deleted file mode 100644 index d56064861a9c..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ /dev/null @@ -1,535 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_fh_h__ -#define __iwl_fh_h__ - -#include - -/****************************/ -/* Flow Handler Definitions */ -/****************************/ - -/** - * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) - * Addresses are offsets from device's PCI hardware base address. - */ -#define FH_MEM_LOWER_BOUND (0x1000) -#define FH_MEM_UPPER_BOUND (0x2000) - -/** - * Keep-Warm (KW) buffer base address. - * - * Driver must allocate a 4KByte buffer that is for keeping the - * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency - * DRAM access when doing Txing or Rxing. The dummy accesses prevent host - * from going into a power-savings mode that would cause higher DRAM latency, - * and possible data over/under-runs, before all Tx/Rx is complete. - * - * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) - * of the buffer, which must be 4K aligned. Once this is set up, the device - * automatically invokes keep-warm accesses when normal accesses might not - * be sufficient to maintain fast DRAM response. - * - * Bit fields: - * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned - */ -#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) - - -/** - * TFD Circular Buffers Base (CBBC) addresses - * - * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident - * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) - * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 - * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte - * aligned (address bits 0-7 must be 0). - * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers - * for them are in different places. - * - * Bit fields in each pointer register: - * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned - */ -#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) -#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) -#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) -#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) -#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) -#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) - -/* Find TFD CB base pointer for given queue */ -static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) -{ - if (chnl < 16) - return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; - if (chnl < 20) - return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); - WARN_ON_ONCE(chnl >= 32); - return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); -} - - -/** - * Rx SRAM Control and Status Registers (RSCSR) - * - * These registers provide handshake between driver and device for the Rx queue - * (this queue handles *all* command responses, notifications, Rx data, etc. - * sent from uCode to host driver). Unlike Tx, there is only one Rx - * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can - * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer - * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 - * mapping between RBDs and RBs. - * - * Driver must allocate host DRAM memory for the following, and set the - * physical address of each into device registers: - * - * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 - * entries (although any power of 2, up to 4096, is selectable by driver). - * Each entry (1 dword) points to a receive buffer (RB) of consistent size - * (typically 4K, although 8K or 16K are also selectable by driver). - * Driver sets up RB size and number of RBDs in the CB via Rx config - * register FH_MEM_RCSR_CHNL0_CONFIG_REG. - * - * Bit fields within one RBD: - * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned - * - * Driver sets physical address [35:8] of base of RBD circular buffer - * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. - * - * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers - * (RBs) have been filled, via a "write pointer", actually the index of - * the RB's corresponding RBD within the circular buffer. Driver sets - * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. - * - * Bit fields in lower dword of Rx status buffer (upper dword not used - * by driver: - * 31-12: Not used by driver - * 11- 0: Index of last filled Rx buffer descriptor - * (device writes, driver reads this value) - * - * As the driver prepares Receive Buffers (RBs) for device to fill, driver must - * enter pointers to these RBs into contiguous RBD circular buffer entries, - * and update the device's "write" index register, - * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. - * - * This "write" index corresponds to the *next* RBD that the driver will make - * available, i.e. one RBD past the tail of the ready-to-fill RBDs within - * the circular buffer. This value should initially be 0 (before preparing any - * RBs), should be 8 after preparing the first 8 RBs (for example), and must - * wrap back to 0 at the end of the circular buffer (but don't wrap before - * "read" index has advanced past 1! See below). - * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. - * - * As the device fills RBs (referenced from contiguous RBDs within the circular - * buffer), it updates the Rx status buffer in host DRAM, 2) described above, - * to tell the driver the index of the latest filled RBD. The driver must - * read this "read" index from DRAM after receiving an Rx interrupt from device - * - * The driver must also internally keep track of a third index, which is the - * next RBD to process. When receiving an Rx interrupt, driver should process - * all filled but unprocessed RBs up to, but not including, the RB - * corresponding to the "read" index. For example, if "read" index becomes "1", - * driver may process the RB pointed to by RBD 0. Depending on volume of - * traffic, there may be many RBs to process. - * - * If read index == write index, device thinks there is no room to put new data. - * Due to this, the maximum number of filled RBs is 255, instead of 256. To - * be safe, make sure that there is a gap of at least 2 RBDs between "write" - * and "read" indexes; that is, make sure that there are no more than 254 - * buffers waiting to be filled. - */ -#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) -#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) -#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) - -/** - * Physical base address of 8-byte Rx Status buffer. - * Bit fields: - * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. - */ -#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) - -/** - * Physical base address of Rx Buffer Descriptor Circular Buffer. - * Bit fields: - * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. - */ -#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) - -/** - * Rx write pointer (index, really!). - * Bit fields: - * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. - * NOTE: For 256-entry circular buffer, use only bits [7:0]. - */ -#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) -#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) - -#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) -#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG - -/** - * Rx Config/Status Registers (RCSR) - * Rx Config Reg for channel 0 (only channel used) - * - * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for - * normal operation (see bit fields). - * - * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. - * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for - * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. - * - * Bit fields: - * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, - * '10' operate normally - * 29-24: reserved - * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), - * min "5" for 32 RBDs, max "12" for 4096 RBDs. - * 19-18: reserved - * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, - * '10' 12K, '11' 16K. - * 15-14: reserved - * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) - * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) - * typical value 0x10 (about 1/2 msec) - * 3- 0: reserved - */ -#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) -#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) -#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) - -#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) -#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8) -#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10) - -#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ -#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ -#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ -#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ -#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ -#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ - -#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) -#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) -#define RX_RB_TIMEOUT (0x11) - -#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) -#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) -#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) - -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) -#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) - -#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) -#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) -#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) - -/** - * Rx Shared Status Registers (RSSR) - * - * After stopping Rx DMA channel (writing 0 to - * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll - * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. - * - * Bit fields: - * 24: 1 = Channel 0 is idle - * - * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV - * contain default values that should not be altered by the driver. - */ -#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) -#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) - -#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) -#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) -#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ - (FH_MEM_RSSR_LOWER_BOUND + 0x008) - -#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) - -#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 -#define FH_MEM_TB_MAX_LENGTH (0x00020000) - -/* TFDB Area - TFDs buffer table */ -#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) -#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) -#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) -#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) -#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) - -/** - * Transmit DMA Channel Control/Status Registers (TCSR) - * - * Device has one configuration register for each of 8 Tx DMA/FIFO channels - * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, - * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. - * - * To use a Tx DMA channel, driver must initialize its - * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: - * - * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL - * - * All other bits should be 0. - * - * Bit fields: - * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, - * '10' operate normally - * 29- 4: Reserved, set to "0" - * 3: Enable internal DMA requests (1, normal operation), disable (0) - * 2- 0: Reserved, set to "0" - */ -#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) -#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) - -/* Find Control/Status reg for given Tx DMA/FIFO channel */ -#define FH_TCSR_CHNL_NUM (8) - -/* TCSR: tx_config register values */ -#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ - (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) -#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ - (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) -#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ - (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) - -#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) - -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) - -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) - -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) -#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) - -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) -#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) - -#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) -#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) -#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) - -#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) -#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) - -/** - * Tx Shared Status Registers (TSSR) - * - * After stopping Tx DMA channel (writing 0 to - * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll - * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle - * (channel's buffers empty | no pending requests). - * - * Bit fields: - * 31-24: 1 = Channel buffers empty (channel 7:0) - * 23-16: 1 = No pending requests (channel 7:0) - */ -#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) -#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) - -#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) - -/** - * Bit fields for TSSR(Tx Shared Status & Control) error status register: - * 31: Indicates an address error when accessed to internal memory - * uCode/driver must write "1" in order to clear this flag - * 30: Indicates that Host did not send the expected number of dwords to FH - * uCode/driver must write "1" in order to clear this flag - * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA - * command was received from the scheduler while the TRB was already full - * with previous command - * uCode/driver must write "1" in order to clear this flag - * 7-0: Each status bit indicates a channel's TxCredit error. When an error - * bit is set, it indicates that the FH has received a full indication - * from the RTC TxFIFO and the current value of the TxCredit counter was - * not equal to zero. This mean that the credit mechanism was not - * synchronized to the TxFIFO status - * uCode/driver must write "1" in order to clear this flag - */ -#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) -#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008) - -#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) - -/* Tx service channels */ -#define FH_SRVC_CHNL (9) -#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) -#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) -#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ - (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) - -#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) -#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) - -/* Instruct FH to increment the retry count of a packet when - * it is brought from the memory to TX-FIFO - */ -#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) - -#define RX_QUEUE_SIZE 256 -#define RX_QUEUE_MASK 255 -#define RX_QUEUE_SIZE_LOG 8 - -/** - * struct iwl_rb_status - reserve buffer status - * host memory mapped FH registers - * @closed_rb_num [0:11] - Indicates the index of the RB which was closed - * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed - * @finished_rb_num [0:11] - Indicates the index of the current RB - * in which the last frame was written to - * @finished_fr_num [0:11] - Indicates the index of the RX Frame - * which was transferred - */ -struct iwl_rb_status { - __le16 closed_rb_num; - __le16 closed_fr_num; - __le16 finished_rb_num; - __le16 finished_fr_nam; - __le32 __unused; -} __packed; - - -#define TFD_QUEUE_SIZE_MAX (256) -#define TFD_QUEUE_SIZE_BC_DUP (64) -#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) -#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) -#define IWL_NUM_OF_TBS 20 - -static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) -{ - return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; -} -/** - * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor - * - * This structure contains dma address and length of transmission address - * - * @lo: low [31:0] portion of the dma address of TX buffer - * every even is unaligned on 16 bit boundary - * @hi_n_len 0-3 [35:32] portion of dma - * 4-15 length of the tx buffer - */ -struct iwl_tfd_tb { - __le32 lo; - __le16 hi_n_len; -} __packed; - -/** - * struct iwl_tfd - * - * Transmit Frame Descriptor (TFD) - * - * @ __reserved1[3] reserved - * @ num_tbs 0-4 number of active tbs - * 5 reserved - * 6-7 padding (not used) - * @ tbs[20] transmit frame buffer descriptors - * @ __pad padding - * - * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. - * Both driver and device share these circular buffers, each of which must be - * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes - * - * Driver must indicate the physical address of the base of each - * circular buffer via the FH_MEM_CBBC_QUEUE registers. - * - * Each TFD contains pointer/size information for up to 20 data buffers - * in host DRAM. These buffers collectively contain the (one) frame described - * by the TFD. Each buffer must be a single contiguous block of memory within - * itself, but buffers may be scattered in host DRAM. Each buffer has max size - * of (4K - 4). The concatenates all of a TFD's buffers into a single - * Tx frame, up to 8 KBytes in size. - * - * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. - */ -struct iwl_tfd { - u8 __reserved1[3]; - u8 num_tbs; - struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; - __le32 __pad; -} __packed; - -/* Keep Warm Size */ -#define IWL_KW_SIZE 0x1000 /* 4k */ - -/* Fixed (non-configurable) rx data from phy */ - -/** - * struct iwlagn_schedq_bc_tbl scheduler byte count table - * base physical address provided by SCD_DRAM_BASE_ADDR - * @tfd_offset 0-12 - tx command byte count - * 12-16 - station index - */ -struct iwlagn_scd_bc_tbl { - __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; -} __packed; - -#endif /* !__iwl_fh_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h deleted file mode 100644 index 9dbe19cbb4dd..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ /dev/null @@ -1,320 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_error_dump_h__ -#define __fw_error_dump_h__ - -#include - -#define IWL_FW_ERROR_DUMP_BARKER 0x14789632 - -/** - * enum iwl_fw_error_dump_type - types of data in the dump file - * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0 - * @IWL_FW_ERROR_DUMP_RXF: - * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as - * &struct iwl_fw_error_dump_txcmd packets - * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info - * info on the device / firmware. - * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor - * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several - * sections like this in a single file. - * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers - * @IWL_FW_ERROR_DUMP_MEM: chunk of memory - * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. - * Structured as &struct iwl_fw_error_dump_trigger_desc. - * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as - * &struct iwl_fw_error_dump_rb - * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were - * paged to the DRAM. - */ -enum iwl_fw_error_dump_type { - /* 0 is deprecated */ - IWL_FW_ERROR_DUMP_CSR = 1, - IWL_FW_ERROR_DUMP_RXF = 2, - IWL_FW_ERROR_DUMP_TXCMD = 3, - IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4, - IWL_FW_ERROR_DUMP_FW_MONITOR = 5, - IWL_FW_ERROR_DUMP_PRPH = 6, - IWL_FW_ERROR_DUMP_TXF = 7, - IWL_FW_ERROR_DUMP_FH_REGS = 8, - IWL_FW_ERROR_DUMP_MEM = 9, - IWL_FW_ERROR_DUMP_ERROR_INFO = 10, - IWL_FW_ERROR_DUMP_RB = 11, - IWL_FW_ERROR_DUMP_PAGING = 12, - - IWL_FW_ERROR_DUMP_MAX, -}; - -/** - * struct iwl_fw_error_dump_data - data for one type - * @type: %enum iwl_fw_error_dump_type - * @len: the length starting from %data - * @data: the data itself - */ -struct iwl_fw_error_dump_data { - __le32 type; - __le32 len; - __u8 data[]; -} __packed; - -/** - * struct iwl_fw_error_dump_file - the layout of the header of the file - * @barker: must be %IWL_FW_ERROR_DUMP_BARKER - * @file_len: the length of all the file starting from %barker - * @data: array of %struct iwl_fw_error_dump_data - */ -struct iwl_fw_error_dump_file { - __le32 barker; - __le32 file_len; - u8 data[0]; -} __packed; - -/** - * struct iwl_fw_error_dump_txcmd - TX command data - * @cmdlen: original length of command - * @caplen: captured length of command (may be less) - * @data: captured command data, @caplen bytes - */ -struct iwl_fw_error_dump_txcmd { - __le32 cmdlen; - __le32 caplen; - u8 data[]; -} __packed; - -/** - * struct iwl_fw_error_dump_fifo - RX/TX FIFO data - * @fifo_num: number of FIFO (starting from 0) - * @available_bytes: num of bytes available in FIFO (may be less than FIFO size) - * @wr_ptr: position of write pointer - * @rd_ptr: position of read pointer - * @fence_ptr: position of fence pointer - * @fence_mode: the current mode of the fence (before locking) - - * 0=follow RD pointer ; 1 = freeze - * @data: all of the FIFO's data - */ -struct iwl_fw_error_dump_fifo { - __le32 fifo_num; - __le32 available_bytes; - __le32 wr_ptr; - __le32 rd_ptr; - __le32 fence_ptr; - __le32 fence_mode; - u8 data[]; -} __packed; - -enum iwl_fw_error_dump_family { - IWL_FW_ERROR_DUMP_FAMILY_7 = 7, - IWL_FW_ERROR_DUMP_FAMILY_8 = 8, -}; - -/** - * struct iwl_fw_error_dump_info - info on the device / firmware - * @device_family: the family of the device (7 / 8) - * @hw_step: the step of the device - * @fw_human_readable: human readable FW version - * @dev_human_readable: name of the device - * @bus_human_readable: name of the bus used - */ -struct iwl_fw_error_dump_info { - __le32 device_family; - __le32 hw_step; - u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; - u8 dev_human_readable[64]; - u8 bus_human_readable[8]; -} __packed; - -/** - * struct iwl_fw_error_dump_fw_mon - FW monitor data - * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer - * @fw_mon_base_ptr: base pointer of the data - * @fw_mon_cycle_cnt: number of wraparounds - * @reserved: for future use - * @data: captured data - */ -struct iwl_fw_error_dump_fw_mon { - __le32 fw_mon_wr_ptr; - __le32 fw_mon_base_ptr; - __le32 fw_mon_cycle_cnt; - __le32 reserved[3]; - u8 data[]; -} __packed; - -/** - * struct iwl_fw_error_dump_prph - periphery registers data - * @prph_start: address of the first register in this chunk - * @data: the content of the registers - */ -struct iwl_fw_error_dump_prph { - __le32 prph_start; - __le32 data[]; -}; - -enum iwl_fw_error_dump_mem_type { - IWL_FW_ERROR_DUMP_MEM_SRAM, - IWL_FW_ERROR_DUMP_MEM_SMEM, -}; - -/** - * struct iwl_fw_error_dump_mem - chunk of memory - * @type: %enum iwl_fw_error_dump_mem_type - * @offset: the offset from which the memory was read - * @data: the content of the memory - */ -struct iwl_fw_error_dump_mem { - __le32 type; - __le32 offset; - u8 data[]; -}; - -/** - * struct iwl_fw_error_dump_rb - content of an Receive Buffer - * @index: the index of the Receive Buffer in the Rx queue - * @rxq: the RB's Rx queue - * @reserved: - * @data: the content of the Receive Buffer - */ -struct iwl_fw_error_dump_rb { - __le32 index; - __le32 rxq; - __le32 reserved; - u8 data[]; -}; - -/** - * struct iwl_fw_error_dump_paging - content of the UMAC's image page - * block on DRAM - * @index: the index of the page block - * @reserved: - * @data: the content of the page block - */ -struct iwl_fw_error_dump_paging { - __le32 index; - __le32 reserved; - u8 data[]; -}; - -/** - * iwl_fw_error_next_data - advance fw error dump data pointer - * @data: previous data block - * Returns: next data block - */ -static inline struct iwl_fw_error_dump_data * -iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) -{ - return (void *)(data->data + le32_to_cpu(data->len)); -} - -/** - * enum iwl_fw_dbg_trigger - triggers available - * - * @FW_DBG_TRIGGER_USER: trigger log collection by user - * This should not be defined as a trigger to the driver, but a value the - * driver should set to indicate that the trigger was initiated by the - * user. - * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts - * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are - * missed. - * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch. - * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a - * command response or a notification. - * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event. - * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold. - * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon - * goes below a threshold. - * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang - * detection. - * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related - * events. - * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. - */ -enum iwl_fw_dbg_trigger { - FW_DBG_TRIGGER_INVALID = 0, - FW_DBG_TRIGGER_USER, - FW_DBG_TRIGGER_FW_ASSERT, - FW_DBG_TRIGGER_MISSED_BEACONS, - FW_DBG_TRIGGER_CHANNEL_SWITCH, - FW_DBG_TRIGGER_FW_NOTIF, - FW_DBG_TRIGGER_MLME, - FW_DBG_TRIGGER_STATS, - FW_DBG_TRIGGER_RSSI, - FW_DBG_TRIGGER_TXQ_TIMERS, - FW_DBG_TRIGGER_TIME_EVENT, - FW_DBG_TRIGGER_BA, - - /* must be last */ - FW_DBG_TRIGGER_MAX, -}; - -/** - * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition - * @type: %enum iwl_fw_dbg_trigger - * @data: raw data about what happened - */ -struct iwl_fw_error_dump_trigger_desc { - __le32 type; - u8 data[]; -}; - -#endif /* __fw_error_dump_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h deleted file mode 100644 index 08303db0000f..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ /dev/null @@ -1,768 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_fw_file_h__ -#define __iwl_fw_file_h__ - -#include -#include - -/* v1/v2 uCode file layout */ -struct iwl_ucode_header { - __le32 ver; /* major/minor/API/serial */ - union { - struct { - __le32 inst_size; /* bytes of runtime code */ - __le32 data_size; /* bytes of runtime data */ - __le32 init_size; /* bytes of init code */ - __le32 init_data_size; /* bytes of init data */ - __le32 boot_size; /* bytes of bootstrap code */ - u8 data[0]; /* in same order as sizes */ - } v1; - struct { - __le32 build; /* build number */ - __le32 inst_size; /* bytes of runtime code */ - __le32 data_size; /* bytes of runtime data */ - __le32 init_size; /* bytes of init code */ - __le32 init_data_size; /* bytes of init data */ - __le32 boot_size; /* bytes of bootstrap code */ - u8 data[0]; /* in same order as sizes */ - } v2; - } u; -}; - -/* - * new TLV uCode file layout - * - * The new TLV file format contains TLVs, that each specify - * some piece of data. - */ - -enum iwl_ucode_tlv_type { - IWL_UCODE_TLV_INVALID = 0, /* unused */ - IWL_UCODE_TLV_INST = 1, - IWL_UCODE_TLV_DATA = 2, - IWL_UCODE_TLV_INIT = 3, - IWL_UCODE_TLV_INIT_DATA = 4, - IWL_UCODE_TLV_BOOT = 5, - IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ - IWL_UCODE_TLV_PAN = 7, - IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, - IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, - IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, - IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, - IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, - IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, - IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, - IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, - IWL_UCODE_TLV_WOWLAN_INST = 16, - IWL_UCODE_TLV_WOWLAN_DATA = 17, - IWL_UCODE_TLV_FLAGS = 18, - IWL_UCODE_TLV_SEC_RT = 19, - IWL_UCODE_TLV_SEC_INIT = 20, - IWL_UCODE_TLV_SEC_WOWLAN = 21, - IWL_UCODE_TLV_DEF_CALIB = 22, - IWL_UCODE_TLV_PHY_SKU = 23, - IWL_UCODE_TLV_SECURE_SEC_RT = 24, - IWL_UCODE_TLV_SECURE_SEC_INIT = 25, - IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, - IWL_UCODE_TLV_NUM_OF_CPU = 27, - IWL_UCODE_TLV_CSCHEME = 28, - IWL_UCODE_TLV_API_CHANGES_SET = 29, - IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, - IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, - IWL_UCODE_TLV_PAGING = 32, - IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, - IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35, - IWL_UCODE_TLV_FW_VERSION = 36, - IWL_UCODE_TLV_FW_DBG_DEST = 38, - IWL_UCODE_TLV_FW_DBG_CONF = 39, - IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, - IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, -}; - -struct iwl_ucode_tlv { - __le32 type; /* see above */ - __le32 length; /* not including type/length fields */ - u8 data[0]; -}; - -#define IWL_TLV_UCODE_MAGIC 0x0a4c5749 -#define FW_VER_HUMAN_READABLE_SZ 64 - -struct iwl_tlv_ucode_header { - /* - * The TLV style ucode header is distinguished from - * the v1/v2 style header by first four bytes being - * zero, as such is an invalid combination of - * major/minor/API/serial versions. - */ - __le32 zero; - __le32 magic; - u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; - /* major/minor/API/serial or major in new format */ - __le32 ver; - __le32 build; - __le64 ignore; - /* - * The data contained herein has a TLV layout, - * see above for the TLV header and types. - * Note that each TLV is padded to a length - * that is a multiple of 4 for alignment. - */ - u8 data[0]; -}; - -/* - * ucode TLVs - * - * ability to get extension for: flags & capabilities from ucode binaries files - */ -struct iwl_ucode_api { - __le32 api_index; - __le32 api_flags; -} __packed; - -struct iwl_ucode_capa { - __le32 api_index; - __le32 api_capa; -} __packed; - -/** - * enum iwl_ucode_tlv_flag - ucode API flags - * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously - * was a separate TLV but moved here to save space. - * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID, - * treats good CRC threshold as a boolean - * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). - * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. - * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS - * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD - * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan - * offload profile config command. - * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six - * (rather than two) IPv6 addresses - * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element - * from the probe request template. - * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) - * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) - * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC - * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and - * P2P client interfaces simultaneously if they are in different bindings. - * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and - * P2P client interfaces simultaneously if they are in same bindings. - * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD - * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save - * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. - * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients - * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. - */ -enum iwl_ucode_tlv_flag { - IWL_UCODE_TLV_FLAGS_PAN = BIT(0), - IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), - IWL_UCODE_TLV_FLAGS_MFP = BIT(2), - IWL_UCODE_TLV_FLAGS_P2P = BIT(3), - IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), - IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), - IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), - IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), - IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), - IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), - IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21), - IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), - IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23), - IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), - IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), - IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), - IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), - IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), -}; - -typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; - -/** - * enum iwl_ucode_tlv_api - ucode api - * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex - * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time - * longer than the passive one, which is essential for fragmented scan. - * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. - * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header - * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params - * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format - * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority - * instead of 3. - * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size - * (command version 3) that supports per-chain limits - * - * @NUM_IWL_UCODE_TLV_API: number of bits used - */ -enum iwl_ucode_tlv_api { - IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, - IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, - IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, - IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, - IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, - IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, - IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, - IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, - - NUM_IWL_UCODE_TLV_API -#ifdef __CHECKER__ - /* sparse says it cannot increment the previous enum member */ - = 128 -#endif -}; - -typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; - -/** - * enum iwl_ucode_tlv_capa - ucode capabilities - * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 - * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory - * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. - * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer - * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM) - * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality - * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current - * tx power value into TPC Report action frame and Link Measurement Report - * action frame - * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current - * channel in DS parameter set element in probe requests. - * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in - * probe requests. - * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests - * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), - * which also implies support for the scheduler configuration command - * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching - * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command - * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command - * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload - * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics - * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running - * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different - * sources for the MCC. This TLV bit is a future replacement to - * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR - * is supported. - * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC - * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan - * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement - * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts - * - * @NUM_IWL_UCODE_TLV_CAPA: number of bits used - */ -enum iwl_ucode_tlv_capa { - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, - IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, - IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, - IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5, - IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, - IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, - IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, - IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, - IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, - IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, - IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, - IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, - IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, - IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, - - NUM_IWL_UCODE_TLV_CAPA -#ifdef __CHECKER__ - /* sparse says it cannot increment the previous enum member */ - = 128 -#endif -}; - -/* The default calibrate table size if not specified by firmware file */ -#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 -#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 -#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 - -/* The default max probe length if not specified by the firmware file */ -#define IWL_DEFAULT_MAX_PROBE_LENGTH 200 - -/* - * For 16.0 uCode and above, there is no differentiation between sections, - * just an offset to the HW address. - */ -#define IWL_UCODE_SECTION_MAX 16 -#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC -#define PAGING_SEPARATOR_SECTION 0xAAAABBBB - -/* uCode version contains 4 values: Major/Minor/API/Serial */ -#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) -#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) -#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) -#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) - -/* - * Calibration control struct. - * Sent as part of the phy configuration command. - * @flow_trigger: bitmap for which calibrations to perform according to - * flow triggers. - * @event_trigger: bitmap for which calibrations to perform according to - * event triggers. - */ -struct iwl_tlv_calib_ctrl { - __le32 flow_trigger; - __le32 event_trigger; -} __packed; - -enum iwl_fw_phy_cfg { - FW_PHY_CFG_RADIO_TYPE_POS = 0, - FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, - FW_PHY_CFG_RADIO_STEP_POS = 2, - FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, - FW_PHY_CFG_RADIO_DASH_POS = 4, - FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, - FW_PHY_CFG_TX_CHAIN_POS = 16, - FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, - FW_PHY_CFG_RX_CHAIN_POS = 20, - FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, -}; - -#define IWL_UCODE_MAX_CS 1 - -/** - * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW. - * @cipher: a cipher suite selector - * @flags: cipher scheme flags (currently reserved for a future use) - * @hdr_len: a size of MPDU security header - * @pn_len: a size of PN - * @pn_off: an offset of pn from the beginning of the security header - * @key_idx_off: an offset of key index byte in the security header - * @key_idx_mask: a bit mask of key_idx bits - * @key_idx_shift: bit shift needed to get key_idx - * @mic_len: mic length in bytes - * @hw_cipher: a HW cipher index used in host commands - */ -struct iwl_fw_cipher_scheme { - __le32 cipher; - u8 flags; - u8 hdr_len; - u8 pn_len; - u8 pn_off; - u8 key_idx_off; - u8 key_idx_mask; - u8 key_idx_shift; - u8 mic_len; - u8 hw_cipher; -} __packed; - -enum iwl_fw_dbg_reg_operator { - CSR_ASSIGN, - CSR_SETBIT, - CSR_CLEARBIT, - - PRPH_ASSIGN, - PRPH_SETBIT, - PRPH_CLEARBIT, - - INDIRECT_ASSIGN, - INDIRECT_SETBIT, - INDIRECT_CLEARBIT, - - PRPH_BLOCKBIT, -}; - -/** - * struct iwl_fw_dbg_reg_op - an operation on a register - * - * @op: %enum iwl_fw_dbg_reg_operator - * @addr: offset of the register - * @val: value - */ -struct iwl_fw_dbg_reg_op { - u8 op; - u8 reserved[3]; - __le32 addr; - __le32 val; -} __packed; - -/** - * enum iwl_fw_dbg_monitor_mode - available monitor recording modes - * - * @SMEM_MODE: monitor stores the data in SMEM - * @EXTERNAL_MODE: monitor stores the data in allocated DRAM - * @MARBH_MODE: monitor stores the data in MARBH buffer - * @MIPI_MODE: monitor outputs the data through the MIPI interface - */ -enum iwl_fw_dbg_monitor_mode { - SMEM_MODE = 0, - EXTERNAL_MODE = 1, - MARBH_MODE = 2, - MIPI_MODE = 3, -}; - -/** - * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data - * - * @version: version of the TLV - currently 0 - * @monitor_mode: %enum iwl_fw_dbg_monitor_mode - * @size_power: buffer size will be 2^(size_power + 11) - * @base_reg: addr of the base addr register (PRPH) - * @end_reg: addr of the end addr register (PRPH) - * @write_ptr_reg: the addr of the reg of the write pointer - * @wrap_count: the addr of the reg of the wrap_count - * @base_shift: shift right of the base addr reg - * @end_shift: shift right of the end addr reg - * @reg_ops: array of registers operations - * - * This parses IWL_UCODE_TLV_FW_DBG_DEST - */ -struct iwl_fw_dbg_dest_tlv { - u8 version; - u8 monitor_mode; - u8 size_power; - u8 reserved; - __le32 base_reg; - __le32 end_reg; - __le32 write_ptr_reg; - __le32 wrap_count; - u8 base_shift; - u8 end_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; -} __packed; - -struct iwl_fw_dbg_conf_hcmd { - u8 id; - u8 reserved; - __le16 len; - u8 data[0]; -} __packed; - -/** - * enum iwl_fw_dbg_trigger_mode - triggers functionalities - * - * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism - * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data - * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to - * collect only monitor data - */ -enum iwl_fw_dbg_trigger_mode { - IWL_FW_DBG_TRIGGER_START = BIT(0), - IWL_FW_DBG_TRIGGER_STOP = BIT(1), - IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), -}; - -/** - * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger - * @IWL_FW_DBG_CONF_VIF_ANY: any vif type - * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode - * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode - * @IWL_FW_DBG_CONF_VIF_AP: AP mode - * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode - * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode - * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device - */ -enum iwl_fw_dbg_trigger_vif_type { - IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED, - IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC, - IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION, - IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP, - IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT, - IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO, - IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE, -}; - -/** - * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger - * @id: %enum iwl_fw_dbg_trigger - * @vif_type: %enum iwl_fw_dbg_trigger_vif_type - * @stop_conf_ids: bitmap of configurations this trigger relates to. - * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding - * to the currently running configuration is set, the data should be - * collected. - * @stop_delay: how many milliseconds to wait before collecting the data - * after the STOP trigger fires. - * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both - * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what - * configuration should be applied when the triggers kicks in. - * @occurrences: number of occurrences. 0 means the trigger will never fire. - */ -struct iwl_fw_dbg_trigger_tlv { - __le32 id; - __le32 vif_type; - __le32 stop_conf_ids; - __le32 stop_delay; - u8 mode; - u8 start_conf_id; - __le16 occurrences; - __le32 reserved[2]; - - u8 data[0]; -} __packed; - -#define FW_DBG_START_FROM_ALIVE 0 -#define FW_DBG_CONF_MAX 32 -#define FW_DBG_INVALID 0xff - -/** - * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons - * @stop_consec_missed_bcon: stop recording if threshold is crossed. - * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed. - * @start_consec_missed_bcon: start recording if threshold is crossed. - * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed. - * @reserved1: reserved - * @reserved2: reserved - */ -struct iwl_fw_dbg_trigger_missed_bcon { - __le32 stop_consec_missed_bcon; - __le32 stop_consec_missed_bcon_since_rx; - __le32 reserved2[2]; - __le32 start_consec_missed_bcon; - __le32 start_consec_missed_bcon_since_rx; - __le32 reserved1[2]; -} __packed; - -/** - * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. - * cmds: the list of commands to trigger the collection on - */ -struct iwl_fw_dbg_trigger_cmd { - struct cmd { - u8 cmd_id; - u8 group_id; - } __packed cmds[16]; -} __packed; - -/** - * iwl_fw_dbg_trigger_stats - configures trigger for statistics - * @stop_offset: the offset of the value to be monitored - * @stop_threshold: the threshold above which to collect - * @start_offset: the offset of the value to be monitored - * @start_threshold: the threshold above which to start recording - */ -struct iwl_fw_dbg_trigger_stats { - __le32 stop_offset; - __le32 stop_threshold; - __le32 start_offset; - __le32 start_threshold; -} __packed; - -/** - * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI - * @rssi: RSSI value to trigger at - */ -struct iwl_fw_dbg_trigger_low_rssi { - __le32 rssi; -} __packed; - -/** - * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events - * @stop_auth_denied: number of denied authentication to collect - * @stop_auth_timeout: number of authentication timeout to collect - * @stop_rx_deauth: number of Rx deauth before to collect - * @stop_tx_deauth: number of Tx deauth before to collect - * @stop_assoc_denied: number of denied association to collect - * @stop_assoc_timeout: number of association timeout to collect - * @stop_connection_loss: number of connection loss to collect - * @start_auth_denied: number of denied authentication to start recording - * @start_auth_timeout: number of authentication timeout to start recording - * @start_rx_deauth: number of Rx deauth to start recording - * @start_tx_deauth: number of Tx deauth to start recording - * @start_assoc_denied: number of denied association to start recording - * @start_assoc_timeout: number of association timeout to start recording - * @start_connection_loss: number of connection loss to start recording - */ -struct iwl_fw_dbg_trigger_mlme { - u8 stop_auth_denied; - u8 stop_auth_timeout; - u8 stop_rx_deauth; - u8 stop_tx_deauth; - - u8 stop_assoc_denied; - u8 stop_assoc_timeout; - u8 stop_connection_loss; - u8 reserved; - - u8 start_auth_denied; - u8 start_auth_timeout; - u8 start_rx_deauth; - u8 start_tx_deauth; - - u8 start_assoc_denied; - u8 start_assoc_timeout; - u8 start_connection_loss; - u8 reserved2; -} __packed; - -/** - * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer - * @command_queue: timeout for the command queue in ms - * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms - * @softap: timeout for the queues of a softAP in ms - * @p2p_go: timeout for the queues of a P2P GO in ms - * @p2p_client: timeout for the queues of a P2P client in ms - * @p2p_device: timeout for the queues of a P2P device in ms - * @ibss: timeout for the queues of an IBSS in ms - * @tdls: timeout for the queues of a TDLS station in ms - */ -struct iwl_fw_dbg_trigger_txq_timer { - __le32 command_queue; - __le32 bss; - __le32 softap; - __le32 p2p_go; - __le32 p2p_client; - __le32 p2p_device; - __le32 ibss; - __le32 tdls; - __le32 reserved[4]; -} __packed; - -/** - * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger - * time_Events: a list of tuples . The driver will issue a - * trigger each time a time event notification that relates to time event - * id with one of the actions in the bitmap is received and - * BIT(notif->status) is set in status_bitmap. - * - */ -struct iwl_fw_dbg_trigger_time_event { - struct { - __le32 id; - __le32 action_bitmap; - __le32 status_bitmap; - } __packed time_events[16]; -} __packed; - -/** - * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger - * rx_ba_start: tid bitmap to configure on what tid the trigger should occur - * when an Rx BlockAck session is started. - * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur - * when an Rx BlockAck session is stopped. - * tx_ba_start: tid bitmap to configure on what tid the trigger should occur - * when a Tx BlockAck session is started. - * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur - * when a Tx BlockAck session is stopped. - * rx_bar: tid bitmap to configure on what tid the trigger should occur - * when a BAR is received (for a Tx BlockAck session). - * tx_bar: tid bitmap to configure on what tid the trigger should occur - * when a BAR is send (for an Rx BlocAck session). - * frame_timeout: tid bitmap to configure on what tid the trigger should occur - * when a frame times out in the reodering buffer. - */ -struct iwl_fw_dbg_trigger_ba { - __le16 rx_ba_start; - __le16 rx_ba_stop; - __le16 tx_ba_start; - __le16 tx_ba_stop; - __le16 rx_bar; - __le16 tx_bar; - __le16 frame_timeout; -} __packed; - -/** - * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. - * @id: conf id - * @usniffer: should the uSniffer image be used - * @num_of_hcmds: how many HCMDs to send are present here - * @hcmd: a variable length host command to be sent to apply the configuration. - * If there is more than one HCMD to send, they will appear one after the - * other and be sent in the order that they appear in. - * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to - * %FW_DBG_CONF_MAX configuration per run. - */ -struct iwl_fw_dbg_conf_tlv { - u8 id; - u8 usniffer; - u8 reserved; - u8 num_of_hcmds; - struct iwl_fw_dbg_conf_hcmd hcmd; -} __packed; - -/** - * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW - * @max_scan_cache_size: total space allocated for scan results (in bytes). - * @max_scan_buckets: maximum number of channel buckets. - * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. - * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. - * @max_scan_reporting_threshold: max possible report threshold. in percentage. - * @max_hotlist_aps: maximum number of entries for hotlist APs. - * @max_significant_change_aps: maximum number of entries for significant - * change APs. - * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can - * hold. - */ -struct iwl_fw_gscan_capabilities { - __le32 max_scan_cache_size; - __le32 max_scan_buckets; - __le32 max_ap_cache_per_scan; - __le32 max_rssi_sample_size; - __le32 max_scan_reporting_threshold; - __le32 max_hotlist_aps; - __le32 max_significant_change_aps; - __le32 max_bssid_history_entries; -} __packed; - -#endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h deleted file mode 100644 index 84ec0cefb62a..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ /dev/null @@ -1,322 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_fw_h__ -#define __iwl_fw_h__ -#include -#include - -#include "iwl-fw-file.h" -#include "iwl-fw-error-dump.h" - -/** - * enum iwl_ucode_type - * - * The type of ucode. - * - * @IWL_UCODE_REGULAR: Normal runtime ucode - * @IWL_UCODE_INIT: Initial ucode - * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode - * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image - */ -enum iwl_ucode_type { - IWL_UCODE_REGULAR, - IWL_UCODE_INIT, - IWL_UCODE_WOWLAN, - IWL_UCODE_REGULAR_USNIFFER, - IWL_UCODE_TYPE_MAX, -}; - -/* - * enumeration of ucode section. - * This enumeration is used directly for older firmware (before 16.0). - * For new firmware, there can be up to 4 sections (see below) but the - * first one packaged into the firmware file is the DATA section and - * some debugging code accesses that. - */ -enum iwl_ucode_sec { - IWL_UCODE_SECTION_DATA, - IWL_UCODE_SECTION_INST, -}; - -struct iwl_ucode_capabilities { - u32 max_probe_length; - u32 n_scan_channels; - u32 standard_phy_calibration_size; - u32 flags; - unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; - unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; -}; - -static inline bool -fw_has_api(const struct iwl_ucode_capabilities *capabilities, - iwl_ucode_tlv_api_t api) -{ - return test_bit((__force long)api, capabilities->_api); -} - -static inline bool -fw_has_capa(const struct iwl_ucode_capabilities *capabilities, - iwl_ucode_tlv_capa_t capa) -{ - return test_bit((__force long)capa, capabilities->_capa); -} - -/* one for each uCode image (inst/data, init/runtime/wowlan) */ -struct fw_desc { - const void *data; /* vmalloc'ed data */ - u32 len; /* size in bytes */ - u32 offset; /* offset in the device */ -}; - -struct fw_img { - struct fw_desc sec[IWL_UCODE_SECTION_MAX]; - bool is_dual_cpus; - u32 paging_mem_size; -}; - -struct iwl_sf_region { - u32 addr; - u32 size; -}; - -/* - * Block paging calculations - */ -#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ -#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ -#define PAGE_PER_GROUP_2_EXP_SIZE 3 -/* 8 pages per group */ -#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) -/* don't change, support only 32KB size */ -#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) -/* 32K == 2^15 */ -#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) - -/* - * Image paging calculations - */ -#define BLOCK_PER_IMAGE_2_EXP_SIZE 5 -/* 2^5 == 32 blocks per image */ -#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) -/* maximum image size 1024KB */ -#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) - -/* Virtual address signature */ -#define PAGING_ADDR_SIG 0xAA000000 - -#define PAGING_CMD_IS_SECURED BIT(9) -#define PAGING_CMD_IS_ENABLED BIT(8) -#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 -#define PAGING_TLV_SECURE_MASK 1 - -/** - * struct iwl_fw_paging - * @fw_paging_phys: page phy pointer - * @fw_paging_block: pointer to the allocated block - * @fw_paging_size: page size - */ -struct iwl_fw_paging { - dma_addr_t fw_paging_phys; - struct page *fw_paging_block; - u32 fw_paging_size; -}; - -/** - * struct iwl_fw_cscheme_list - a cipher scheme list - * @size: a number of entries - * @cs: cipher scheme entries - */ -struct iwl_fw_cscheme_list { - u8 size; - struct iwl_fw_cipher_scheme cs[]; -} __packed; - -/** - * struct iwl_gscan_capabilities - gscan capabilities supported by FW - * @max_scan_cache_size: total space allocated for scan results (in bytes). - * @max_scan_buckets: maximum number of channel buckets. - * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. - * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. - * @max_scan_reporting_threshold: max possible report threshold. in percentage. - * @max_hotlist_aps: maximum number of entries for hotlist APs. - * @max_significant_change_aps: maximum number of entries for significant - * change APs. - * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can - * hold. - */ -struct iwl_gscan_capabilities { - u32 max_scan_cache_size; - u32 max_scan_buckets; - u32 max_ap_cache_per_scan; - u32 max_rssi_sample_size; - u32 max_scan_reporting_threshold; - u32 max_hotlist_aps; - u32 max_significant_change_aps; - u32 max_bssid_history_entries; -}; - -/** - * struct iwl_fw - variables associated with the firmware - * - * @ucode_ver: ucode version from the ucode file - * @fw_version: firmware version string - * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. - * @ucode_capa: capabilities parsed from the ucode file. - * @enhance_sensitivity_table: device can do enhanced sensitivity. - * @init_evtlog_ptr: event log offset for init ucode. - * @init_evtlog_size: event log size for init ucode. - * @init_errlog_ptr: error log offfset for init ucode. - * @inst_evtlog_ptr: event log offset for runtime ucode. - * @inst_evtlog_size: event log size for runtime ucode. - * @inst_errlog_ptr: error log offfset for runtime ucode. - * @mvm_fw: indicates this is MVM firmware - * @cipher_scheme: optional external cipher scheme. - * @human_readable: human readable version - * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until - * we get the ALIVE from the uCode - * @dbg_dest_tlv: points to the destination TLV for debug - * @dbg_conf_tlv: array of pointers to configuration TLVs for debug - * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries - * @dbg_trigger_tlv: array of pointers to triggers TLVs - * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries - * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv - */ -struct iwl_fw { - u32 ucode_ver; - - char fw_version[ETHTOOL_FWVERS_LEN]; - - /* ucode images */ - struct fw_img img[IWL_UCODE_TYPE_MAX]; - - struct iwl_ucode_capabilities ucode_capa; - bool enhance_sensitivity_table; - - u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; - u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; - - struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; - u32 phy_config; - u8 valid_tx_ant; - u8 valid_rx_ant; - - bool mvm_fw; - - struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; - u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; - - u32 sdio_adma_addr; - - struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; - size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; - size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; - u8 dbg_dest_reg_num; - struct iwl_gscan_capabilities gscan_capa; -}; - -static inline const char *get_fw_dbg_mode_string(int mode) -{ - switch (mode) { - case SMEM_MODE: - return "SMEM"; - case EXTERNAL_MODE: - return "EXTERNAL_DRAM"; - case MARBH_MODE: - return "MARBH"; - case MIPI_MODE: - return "MIPI"; - default: - return "UNKNOWN"; - } -} - -static inline bool -iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) -{ - const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; - - if (!conf_tlv) - return false; - - return conf_tlv->usniffer; -} - -#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ - void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ - unlikely(__dbg_trigger); \ -}) - -static inline struct iwl_fw_dbg_trigger_tlv* -iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id) -{ - if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv))) - return NULL; - - return fw->dbg_trigger_tlv[id]; -} - -#endif /* __iwl_fw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c deleted file mode 100644 index 0bd9d4aad0c0..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-io.c +++ /dev/null @@ -1,289 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include - -#include "iwl-drv.h" -#include "iwl-io.h" -#include "iwl-csr.h" -#include "iwl-debug.h" -#include "iwl-prph.h" -#include "iwl-fh.h" - -void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) -{ - trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); - iwl_trans_write8(trans, ofs, val); -} -IWL_EXPORT_SYMBOL(iwl_write8); - -void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val) -{ - trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val); - iwl_trans_write32(trans, ofs, val); -} -IWL_EXPORT_SYMBOL(iwl_write32); - -u32 iwl_read32(struct iwl_trans *trans, u32 ofs) -{ - u32 val = iwl_trans_read32(trans, ofs); - - trace_iwlwifi_dev_ioread32(trans->dev, ofs, val); - return val; -} -IWL_EXPORT_SYMBOL(iwl_read32); - -#define IWL_POLL_INTERVAL 10 /* microseconds */ - -int iwl_poll_bit(struct iwl_trans *trans, u32 addr, - u32 bits, u32 mask, int timeout) -{ - int t = 0; - - do { - if ((iwl_read32(trans, addr) & mask) == (bits & mask)) - return t; - udelay(IWL_POLL_INTERVAL); - t += IWL_POLL_INTERVAL; - } while (t < timeout); - - return -ETIMEDOUT; -} -IWL_EXPORT_SYMBOL(iwl_poll_bit); - -u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) -{ - u32 value = 0x5a5a5a5a; - unsigned long flags; - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - value = iwl_read32(trans, reg); - iwl_trans_release_nic_access(trans, &flags); - } - - return value; -} -IWL_EXPORT_SYMBOL(iwl_read_direct32); - -void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) -{ - unsigned long flags; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - iwl_write32(trans, reg, value); - iwl_trans_release_nic_access(trans, &flags); - } -} -IWL_EXPORT_SYMBOL(iwl_write_direct32); - -int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, - int timeout) -{ - int t = 0; - - do { - if ((iwl_read_direct32(trans, addr) & mask) == mask) - return t; - udelay(IWL_POLL_INTERVAL); - t += IWL_POLL_INTERVAL; - } while (t < timeout); - - return -ETIMEDOUT; -} -IWL_EXPORT_SYMBOL(iwl_poll_direct_bit); - -u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) -{ - u32 val = iwl_trans_read_prph(trans, ofs); - trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); - return val; -} - -void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) -{ - trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); - iwl_trans_write_prph(trans, ofs, val); -} - -u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) -{ - unsigned long flags; - u32 val = 0x5a5a5a5a; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - val = __iwl_read_prph(trans, ofs); - iwl_trans_release_nic_access(trans, &flags); - } - return val; -} -IWL_EXPORT_SYMBOL(iwl_read_prph); - -void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) -{ - unsigned long flags; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - __iwl_write_prph(trans, ofs, val); - iwl_trans_release_nic_access(trans, &flags); - } -} -IWL_EXPORT_SYMBOL(iwl_write_prph); - -int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, - u32 bits, u32 mask, int timeout) -{ - int t = 0; - - do { - if ((iwl_read_prph(trans, addr) & mask) == (bits & mask)) - return t; - udelay(IWL_POLL_INTERVAL); - t += IWL_POLL_INTERVAL; - } while (t < timeout); - - return -ETIMEDOUT; -} - -void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) -{ - unsigned long flags; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - __iwl_write_prph(trans, ofs, - __iwl_read_prph(trans, ofs) | mask); - iwl_trans_release_nic_access(trans, &flags); - } -} -IWL_EXPORT_SYMBOL(iwl_set_bits_prph); - -void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, - u32 bits, u32 mask) -{ - unsigned long flags; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - __iwl_write_prph(trans, ofs, - (__iwl_read_prph(trans, ofs) & mask) | bits); - iwl_trans_release_nic_access(trans, &flags); - } -} -IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph); - -void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) -{ - unsigned long flags; - u32 val; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - val = __iwl_read_prph(trans, ofs); - __iwl_write_prph(trans, ofs, (val & ~mask)); - iwl_trans_release_nic_access(trans, &flags); - } -} -IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); - -void iwl_force_nmi(struct iwl_trans *trans) -{ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_DRV); - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_HW); - } else { - iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG, - DEVICE_SET_NMI_8000_VAL); - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_DRV); - } -} -IWL_EXPORT_SYMBOL(iwl_force_nmi); - -static const char *get_fh_string(int cmd) -{ -#define IWL_CMD(x) case x: return #x - switch (cmd) { - IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); - IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); - IWL_CMD(FH_RSCSR_CHNL0_WPTR); - IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); - IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); - IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); - IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); - IWL_CMD(FH_TSSR_TX_STATUS_REG); - IWL_CMD(FH_TSSR_TX_ERROR_REG); - default: - return "UNKNOWN"; - } -#undef IWL_CMD -} - -int iwl_dump_fh(struct iwl_trans *trans, char **buf) -{ - int i; - static const u32 fh_tbl[] = { - FH_RSCSR_CHNL0_STTS_WPTR_REG, - FH_RSCSR_CHNL0_RBDCB_BASE_REG, - FH_RSCSR_CHNL0_WPTR, - FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_MEM_RSSR_SHARED_CTRL_REG, - FH_MEM_RSSR_RX_STATUS_REG, - FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, - FH_TSSR_TX_STATUS_REG, - FH_TSSR_TX_ERROR_REG - }; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (buf) { - int pos = 0; - size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; - - *buf = kmalloc(bufsz, GFP_KERNEL); - if (!*buf) - return -ENOMEM; - - pos += scnprintf(*buf + pos, bufsz - pos, - "FH register values:\n"); - - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) - pos += scnprintf(*buf + pos, bufsz - pos, - " %34s: 0X%08x\n", - get_fh_string(fh_tbl[i]), - iwl_read_direct32(trans, fh_tbl[i])); - - return pos; - } -#endif - - IWL_ERR(trans, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) - IWL_ERR(trans, " %34s: 0X%08x\n", - get_fh_string(fh_tbl[i]), - iwl_read_direct32(trans, fh_tbl[i])); - - return 0; -} diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h deleted file mode 100644 index 501d0560c061..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ /dev/null @@ -1,73 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Portions of this file are derived from the ipw3945 project. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __iwl_io_h__ -#define __iwl_io_h__ - -#include "iwl-devtrace.h" -#include "iwl-trans.h" - -void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val); -void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val); -u32 iwl_read32(struct iwl_trans *trans, u32 ofs); - -static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) -{ - iwl_trans_set_bits_mask(trans, reg, mask, mask); -} - -static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) -{ - iwl_trans_set_bits_mask(trans, reg, mask, 0); -} - -int iwl_poll_bit(struct iwl_trans *trans, u32 addr, - u32 bits, u32 mask, int timeout); -int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, - int timeout); - -u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); -void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); - - -u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs); -u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); -void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); -void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); -int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, - u32 bits, u32 mask, int timeout); -void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); -void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, - u32 bits, u32 mask); -void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); -void iwl_force_nmi(struct iwl_trans *trans); - -/* Error handling */ -int iwl_dump_fh(struct iwl_trans *trans, char **buf); - -#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h deleted file mode 100644 index ac2b90df8413..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ /dev/null @@ -1,129 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_modparams_h__ -#define __iwl_modparams_h__ - -#include -#include -#include -#include - -extern struct iwl_mod_params iwlwifi_mod_params; - -enum iwl_power_level { - IWL_POWER_INDEX_1, - IWL_POWER_INDEX_2, - IWL_POWER_INDEX_3, - IWL_POWER_INDEX_4, - IWL_POWER_INDEX_5, - IWL_POWER_NUM -}; - -enum iwl_disable_11n { - IWL_DISABLE_HT_ALL = BIT(0), - IWL_DISABLE_HT_TXAGG = BIT(1), - IWL_DISABLE_HT_RXAGG = BIT(2), - IWL_ENABLE_HT_TXAGG = BIT(3), -}; - -/** - * struct iwl_mod_params - * - * Holds the module parameters - * - * @sw_crypto: using hardware encryption, default = 0 - * @disable_11n: disable 11n capabilities, default = 0, - * use IWL_[DIS,EN]ABLE_HT_* constants - * @amsdu_size_8K: enable 8K amsdu size, default = 0 - * @restart_fw: restart firmware, default = 1 - * @bt_coex_active: enable bt coex, default = true - * @led_mode: system default, default = 0 - * @power_save: enable power save, default = false - * @power_level: power level, default = 1 - * @debug_level: levels are IWL_DL_* - * @ant_coupling: antenna coupling in dB, default = 0 - * @d0i3_disable: disable d0i3, default = 1, - * @lar_disable: disable LAR (regulatory), default = 0 - * @fw_monitor: allow to use firmware monitor - */ -struct iwl_mod_params { - int sw_crypto; - unsigned int disable_11n; - int amsdu_size_8K; - bool restart_fw; - bool bt_coex_active; - int led_mode; - bool power_save; - int power_level; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 debug_level; -#endif - int ant_coupling; - char *nvm_file; - bool uapsd_disable; - bool d0i3_disable; - bool lar_disable; - bool fw_monitor; -}; - -#endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c deleted file mode 100644 index 6caf2affbbb5..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c +++ /dev/null @@ -1,193 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include - -#include "iwl-drv.h" -#include "iwl-notif-wait.h" - - -void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) -{ - spin_lock_init(¬if_wait->notif_wait_lock); - INIT_LIST_HEAD(¬if_wait->notif_waits); - init_waitqueue_head(¬if_wait->notif_waitq); -} -IWL_EXPORT_SYMBOL(iwl_notification_wait_init); - -void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt) -{ - bool triggered = false; - - if (!list_empty(¬if_wait->notif_waits)) { - struct iwl_notification_wait *w; - - spin_lock(¬if_wait->notif_wait_lock); - list_for_each_entry(w, ¬if_wait->notif_waits, list) { - int i; - bool found = false; - - /* - * If it already finished (triggered) or has been - * aborted then don't evaluate it again to avoid races, - * Otherwise the function could be called again even - * though it returned true before - */ - if (w->triggered || w->aborted) - continue; - - for (i = 0; i < w->n_cmds; i++) { - if (w->cmds[i] == - WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { - found = true; - break; - } - } - if (!found) - continue; - - if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) { - w->triggered = true; - triggered = true; - } - } - spin_unlock(¬if_wait->notif_wait_lock); - - } - - if (triggered) - wake_up_all(¬if_wait->notif_waitq); -} -IWL_EXPORT_SYMBOL(iwl_notification_wait_notify); - -void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) -{ - struct iwl_notification_wait *wait_entry; - - spin_lock(¬if_wait->notif_wait_lock); - list_for_each_entry(wait_entry, ¬if_wait->notif_waits, list) - wait_entry->aborted = true; - spin_unlock(¬if_wait->notif_wait_lock); - - wake_up_all(¬if_wait->notif_waitq); -} -IWL_EXPORT_SYMBOL(iwl_abort_notification_waits); - -void -iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, - struct iwl_notification_wait *wait_entry, - const u16 *cmds, int n_cmds, - bool (*fn)(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data), - void *fn_data) -{ - if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) - n_cmds = MAX_NOTIF_CMDS; - - wait_entry->fn = fn; - wait_entry->fn_data = fn_data; - wait_entry->n_cmds = n_cmds; - memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); - wait_entry->triggered = false; - wait_entry->aborted = false; - - spin_lock_bh(¬if_wait->notif_wait_lock); - list_add(&wait_entry->list, ¬if_wait->notif_waits); - spin_unlock_bh(¬if_wait->notif_wait_lock); -} -IWL_EXPORT_SYMBOL(iwl_init_notification_wait); - -int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, - struct iwl_notification_wait *wait_entry, - unsigned long timeout) -{ - int ret; - - ret = wait_event_timeout(notif_wait->notif_waitq, - wait_entry->triggered || wait_entry->aborted, - timeout); - - spin_lock_bh(¬if_wait->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(¬if_wait->notif_wait_lock); - - if (wait_entry->aborted) - return -EIO; - - /* return value is always >= 0 */ - if (ret <= 0) - return -ETIMEDOUT; - return 0; -} -IWL_EXPORT_SYMBOL(iwl_wait_notification); - -void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, - struct iwl_notification_wait *wait_entry) -{ - spin_lock_bh(¬if_wait->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(¬if_wait->notif_wait_lock); -} -IWL_EXPORT_SYMBOL(iwl_remove_notification); diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h deleted file mode 100644 index dbe8234521de..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h +++ /dev/null @@ -1,139 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_notif_wait_h__ -#define __iwl_notif_wait_h__ - -#include - -#include "iwl-trans.h" - -struct iwl_notif_wait_data { - struct list_head notif_waits; - spinlock_t notif_wait_lock; - wait_queue_head_t notif_waitq; -}; - -#define MAX_NOTIF_CMDS 5 - -/** - * struct iwl_notification_wait - notification wait entry - * @list: list head for global list - * @fn: Function called with the notification. If the function - * returns true, the wait is over, if it returns false then - * the waiter stays blocked. If no function is given, any - * of the listed commands will unblock the waiter. - * @cmds: command IDs - * @n_cmds: number of command IDs - * @triggered: waiter should be woken up - * @aborted: wait was aborted - * - * This structure is not used directly, to wait for a - * notification declare it on the stack, and call - * iwlagn_init_notification_wait() with appropriate - * parameters. Then do whatever will cause the ucode - * to notify the driver, and to wait for that then - * call iwlagn_wait_notification(). - * - * Each notification is one-shot. If at some point we - * need to support multi-shot notifications (which - * can't be allocated on the stack) we need to modify - * the code for them. - */ -struct iwl_notification_wait { - struct list_head list; - - bool (*fn)(struct iwl_notif_wait_data *notif_data, - struct iwl_rx_packet *pkt, void *data); - void *fn_data; - - u16 cmds[MAX_NOTIF_CMDS]; - u8 n_cmds; - bool triggered, aborted; -}; - - -/* caller functions */ -void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data); -void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, - struct iwl_rx_packet *pkt); -void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); - -/* user functions */ -void __acquires(wait_entry) -iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, - struct iwl_notification_wait *wait_entry, - const u16 *cmds, int n_cmds, - bool (*fn)(struct iwl_notif_wait_data *notif_data, - struct iwl_rx_packet *pkt, void *data), - void *fn_data); - -int __must_check __releases(wait_entry) -iwl_wait_notification(struct iwl_notif_wait_data *notif_data, - struct iwl_notification_wait *wait_entry, - unsigned long timeout); - -void __releases(wait_entry) -iwl_remove_notification(struct iwl_notif_wait_data *notif_data, - struct iwl_notification_wait *wait_entry); - -#endif /* __iwl_notif_wait_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c deleted file mode 100644 index d82984912e04..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ /dev/null @@ -1,844 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#include -#include -#include -#include -#include -#include "iwl-drv.h" -#include "iwl-modparams.h" -#include "iwl-nvm-parse.h" - -/* NVM offsets (in words) definitions */ -enum wkp_nvm_offsets { - /* NVM HW-Section offset (in words) definitions */ - HW_ADDR = 0x15, - - /* NVM SW-Section offset (in words) definitions */ - NVM_SW_SECTION = 0x1C0, - NVM_VERSION = 0, - RADIO_CFG = 1, - SKU = 2, - N_HW_ADDRS = 3, - NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, - - /* NVM calibration section offset (in words) definitions */ - NVM_CALIB_SECTION = 0x2B8, - XTAL_CALIB = 0x316 - NVM_CALIB_SECTION -}; - -enum family_8000_nvm_offsets { - /* NVM HW-Section offset (in words) definitions */ - HW_ADDR0_WFPM_FAMILY_8000 = 0x12, - HW_ADDR1_WFPM_FAMILY_8000 = 0x16, - HW_ADDR0_PCIE_FAMILY_8000 = 0x8A, - HW_ADDR1_PCIE_FAMILY_8000 = 0x8E, - MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1, - - /* NVM SW-Section offset (in words) definitions */ - NVM_SW_SECTION_FAMILY_8000 = 0x1C0, - NVM_VERSION_FAMILY_8000 = 0, - RADIO_CFG_FAMILY_8000 = 0, - SKU_FAMILY_8000 = 2, - N_HW_ADDRS_FAMILY_8000 = 3, - - /* NVM REGULATORY -Section offset (in words) definitions */ - NVM_CHANNELS_FAMILY_8000 = 0, - NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7, - NVM_LAR_OFFSET_FAMILY_8000 = 0x507, - NVM_LAR_ENABLED_FAMILY_8000 = 0x7, - - /* NVM calibration section offset (in words) definitions */ - NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8, - XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000 -}; - -/* SKU Capabilities (actual values from NVM definition) */ -enum nvm_sku_bits { - NVM_SKU_CAP_BAND_24GHZ = BIT(0), - NVM_SKU_CAP_BAND_52GHZ = BIT(1), - NVM_SKU_CAP_11N_ENABLE = BIT(2), - NVM_SKU_CAP_11AC_ENABLE = BIT(3), - NVM_SKU_CAP_MIMO_DISABLE = BIT(5), -}; - -/* - * These are the channel numbers in the order that they are stored in the NVM - */ -static const u8 iwl_nvm_channels[] = { - /* 2.4 GHz */ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - /* 5 GHz */ - 36, 40, 44 , 48, 52, 56, 60, 64, - 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, - 149, 153, 157, 161, 165 -}; - -static const u8 iwl_nvm_channels_family_8000[] = { - /* 2.4 GHz */ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - /* 5 GHz */ - 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, - 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, - 149, 153, 157, 161, 165, 169, 173, 177, 181 -}; - -#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) -#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000) -#define NUM_2GHZ_CHANNELS 14 -#define NUM_2GHZ_CHANNELS_FAMILY_8000 14 -#define FIRST_2GHZ_HT_MINUS 5 -#define LAST_2GHZ_HT_PLUS 9 -#define LAST_5GHZ_HT 165 -#define LAST_5GHZ_HT_FAMILY_8000 181 -#define N_HW_ADDR_MASK 0xF - -/* rate data (static) */ -static struct ieee80211_rate iwl_cfg80211_rates[] = { - { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, - { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, - .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, - { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, - .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, - { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, - .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, - { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, - { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, - { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, - { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, - { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, - { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, - { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, - { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, -}; -#define RATES_24_OFFS 0 -#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) -#define RATES_52_OFFS 4 -#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) - -/** - * enum iwl_nvm_channel_flags - channel flags in NVM - * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo - * @NVM_CHANNEL_IBSS: usable as an IBSS channel - * @NVM_CHANNEL_ACTIVE: active scanning allowed - * @NVM_CHANNEL_RADAR: radar detection required - * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed - * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS - * on same channel on 2.4 or same UNII band on 5.2 - * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) - * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) - * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) - * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) - */ -enum iwl_nvm_channel_flags { - NVM_CHANNEL_VALID = BIT(0), - NVM_CHANNEL_IBSS = BIT(1), - NVM_CHANNEL_ACTIVE = BIT(3), - NVM_CHANNEL_RADAR = BIT(4), - NVM_CHANNEL_INDOOR_ONLY = BIT(5), - NVM_CHANNEL_GO_CONCURRENT = BIT(6), - NVM_CHANNEL_WIDE = BIT(8), - NVM_CHANNEL_40MHZ = BIT(9), - NVM_CHANNEL_80MHZ = BIT(10), - NVM_CHANNEL_160MHZ = BIT(11), -}; - -#define CHECK_AND_PRINT_I(x) \ - ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") - -static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, - u16 nvm_flags, const struct iwl_cfg *cfg) -{ - u32 flags = IEEE80211_CHAN_NO_HT40; - u32 last_5ghz_ht = LAST_5GHZ_HT; - - if (cfg->device_family == IWL_DEVICE_FAMILY_8000) - last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; - - if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { - if (ch_num <= LAST_2GHZ_HT_PLUS) - flags &= ~IEEE80211_CHAN_NO_HT40PLUS; - if (ch_num >= FIRST_2GHZ_HT_MINUS) - flags &= ~IEEE80211_CHAN_NO_HT40MINUS; - } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) { - if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) - flags &= ~IEEE80211_CHAN_NO_HT40PLUS; - else - flags &= ~IEEE80211_CHAN_NO_HT40MINUS; - } - if (!(nvm_flags & NVM_CHANNEL_80MHZ)) - flags |= IEEE80211_CHAN_NO_80MHZ; - if (!(nvm_flags & NVM_CHANNEL_160MHZ)) - flags |= IEEE80211_CHAN_NO_160MHZ; - - if (!(nvm_flags & NVM_CHANNEL_IBSS)) - flags |= IEEE80211_CHAN_NO_IR; - - if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) - flags |= IEEE80211_CHAN_NO_IR; - - if (nvm_flags & NVM_CHANNEL_RADAR) - flags |= IEEE80211_CHAN_RADAR; - - if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) - flags |= IEEE80211_CHAN_INDOOR_ONLY; - - /* Set the GO concurrent flag only in case that NO_IR is set. - * Otherwise it is meaningless - */ - if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && - (flags & IEEE80211_CHAN_NO_IR)) - flags |= IEEE80211_CHAN_IR_CONCURRENT; - - return flags; -} - -static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 * const nvm_ch_flags, - bool lar_supported) -{ - int ch_idx; - int n_channels = 0; - struct ieee80211_channel *channel; - u16 ch_flags; - bool is_5ghz; - int num_of_ch, num_2ghz_channels; - const u8 *nvm_chan; - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { - num_of_ch = IWL_NUM_CHANNELS; - nvm_chan = &iwl_nvm_channels[0]; - num_2ghz_channels = NUM_2GHZ_CHANNELS; - } else { - num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000; - nvm_chan = &iwl_nvm_channels_family_8000[0]; - num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000; - } - - for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { - ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); - - if (ch_idx >= num_2ghz_channels && - !data->sku_cap_band_52GHz_enable) - continue; - - if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) { - /* - * Channels might become valid later if lar is - * supported, hence we still want to add them to - * the list of supported channels to cfg80211. - */ - IWL_DEBUG_EEPROM(dev, - "Ch. %d Flags %x [%sGHz] - No traffic\n", - nvm_chan[ch_idx], - ch_flags, - (ch_idx >= num_2ghz_channels) ? - "5.2" : "2.4"); - continue; - } - - channel = &data->channels[n_channels]; - n_channels++; - - channel->hw_value = nvm_chan[ch_idx]; - channel->band = (ch_idx < num_2ghz_channels) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - channel->center_freq = - ieee80211_channel_to_frequency( - channel->hw_value, channel->band); - - /* Initialize regulatory-based run-time data */ - - /* - * Default value - highest tx power value. max_power - * is not used in mvm, and is used for backwards compatibility - */ - channel->max_power = IWL_DEFAULT_MAX_TX_POWER; - is_5ghz = channel->band == IEEE80211_BAND_5GHZ; - - /* don't put limitations in case we're using LAR */ - if (!lar_supported) - channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], - ch_idx, is_5ghz, - ch_flags, cfg); - else - channel->flags = 0; - - IWL_DEBUG_EEPROM(dev, - "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", - channel->hw_value, - is_5ghz ? "5.2" : "2.4", - CHECK_AND_PRINT_I(VALID), - CHECK_AND_PRINT_I(IBSS), - CHECK_AND_PRINT_I(ACTIVE), - CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), - CHECK_AND_PRINT_I(INDOOR_ONLY), - CHECK_AND_PRINT_I(GO_CONCURRENT), - ch_flags, - channel->max_power, - ((ch_flags & NVM_CHANNEL_IBSS) && - !(ch_flags & NVM_CHANNEL_RADAR)) - ? "" : "not "); - } - - return n_channels; -} - -static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - struct ieee80211_sta_vht_cap *vht_cap, - u8 tx_chains, u8 rx_chains) -{ - int num_rx_ants = num_of_ant(rx_chains); - int num_tx_ants = num_of_ant(tx_chains); - unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: - IEEE80211_VHT_MAX_AMPDU_1024K); - - vht_cap->vht_supported = true; - - vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | - IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | - 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | - max_ampdu_exponent << - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - - if (cfg->ht_params->ldpc) - vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; - - if (data->sku_cap_mimo_disabled) { - num_rx_ants = 1; - num_tx_ants = 1; - } - - if (num_tx_ants > 1) - vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; - else - vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; - - if (iwlwifi_mod_params.amsdu_size_8K) - vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; - - vht_cap->vht_mcs.rx_mcs_map = - cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); - - if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { - vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; - /* this works because NOT_SUPPORTED == 3 */ - vht_cap->vht_mcs.rx_mcs_map |= - cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); - } - - vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; -} - -static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *ch_section, - u8 tx_chains, u8 rx_chains, bool lar_supported) -{ - int n_channels; - int n_used = 0; - struct ieee80211_supported_band *sband; - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - n_channels = iwl_init_channel_map( - dev, cfg, data, - &ch_section[NVM_CHANNELS], lar_supported); - else - n_channels = iwl_init_channel_map( - dev, cfg, data, - &ch_section[NVM_CHANNELS_FAMILY_8000], - lar_supported); - - sband = &data->bands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; - sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; - sband->n_bitrates = N_RATES_24; - n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, - tx_chains, rx_chains); - - sband = &data->bands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; - sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; - sband->n_bitrates = N_RATES_52; - n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, - tx_chains, rx_chains); - if (data->sku_cap_11ac_enable) - iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, - tx_chains, rx_chains); - - if (n_channels != n_used) - IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", - n_used, n_channels); -} - -static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, - const __le16 *phy_sku) -{ - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - return le16_to_cpup(nvm_sw + SKU); - - return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); -} - -static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) -{ - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - return le16_to_cpup(nvm_sw + NVM_VERSION); - else - return le32_to_cpup((__le32 *)(nvm_sw + - NVM_VERSION_FAMILY_8000)); -} - -static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, - const __le16 *phy_sku) -{ - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - return le16_to_cpup(nvm_sw + RADIO_CFG); - - return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); - -} - -static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) -{ - int n_hw_addr; - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - return le16_to_cpup(nvm_sw + N_HW_ADDRS); - - n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); - - return n_hw_addr & N_HW_ADDR_MASK; -} - -static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - u32 radio_cfg) -{ - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { - data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); - data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); - data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); - data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); - return; - } - - /* set the radio configuration for family 8000 */ - data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg); - data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg); - data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg); - data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg); - data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg); - data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg); -} - -static void iwl_set_hw_address(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *nvm_sec) -{ - const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR); - - /* The byte order is little endian 16 bit, meaning 214365 */ - data->hw_addr[0] = hw_addr[1]; - data->hw_addr[1] = hw_addr[0]; - data->hw_addr[2] = hw_addr[3]; - data->hw_addr[3] = hw_addr[2]; - data->hw_addr[4] = hw_addr[5]; - data->hw_addr[5] = hw_addr[4]; -} - -static void iwl_set_hw_address_family_8000(struct device *dev, - const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *mac_override, - const __le16 *nvm_hw, - u32 mac_addr0, u32 mac_addr1) -{ - const u8 *hw_addr; - - if (mac_override) { - static const u8 reserved_mac[] = { - 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 - }; - - hw_addr = (const u8 *)(mac_override + - MAC_ADDRESS_OVERRIDE_FAMILY_8000); - - /* - * Store the MAC address from MAO section. - * No byte swapping is required in MAO section - */ - memcpy(data->hw_addr, hw_addr, ETH_ALEN); - - /* - * Force the use of the OTP MAC address in case of reserved MAC - * address in the NVM, or if address is given but invalid. - */ - if (is_valid_ether_addr(data->hw_addr) && - memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) - return; - - IWL_ERR_DEV(dev, - "mac address from nvm override section is not valid\n"); - } - - if (nvm_hw) { - /* read the MAC address from HW resisters */ - hw_addr = (const u8 *)&mac_addr0; - data->hw_addr[0] = hw_addr[3]; - data->hw_addr[1] = hw_addr[2]; - data->hw_addr[2] = hw_addr[1]; - data->hw_addr[3] = hw_addr[0]; - - hw_addr = (const u8 *)&mac_addr1; - data->hw_addr[4] = hw_addr[1]; - data->hw_addr[5] = hw_addr[0]; - - if (!is_valid_ether_addr(data->hw_addr)) - IWL_ERR_DEV(dev, - "mac address from hw section is not valid\n"); - - return; - } - - IWL_ERR_DEV(dev, "mac address is not found\n"); -} - -#define IWL_4165_DEVICE_ID 0x5501 - -struct iwl_nvm_data * -iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, - const __le16 *nvm_hw, const __le16 *nvm_sw, - const __le16 *nvm_calib, const __le16 *regulatory, - const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1, u32 hw_id) -{ - struct iwl_nvm_data *data; - u32 sku; - u32 radio_cfg; - u16 lar_config; - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - data = kzalloc(sizeof(*data) + - sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS, - GFP_KERNEL); - else - data = kzalloc(sizeof(*data) + - sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS_FAMILY_8000, - GFP_KERNEL); - if (!data) - return NULL; - - data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); - - radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); - iwl_set_radio_cfg(cfg, data, radio_cfg); - if (data->valid_tx_ant) - tx_chains &= data->valid_tx_ant; - if (data->valid_rx_ant) - rx_chains &= data->valid_rx_ant; - - sku = iwl_get_sku(cfg, nvm_sw, phy_sku); - data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; - data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; - data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) - data->sku_cap_11n_enable = false; - data->sku_cap_11ac_enable = data->sku_cap_11n_enable && - (sku & NVM_SKU_CAP_11AC_ENABLE); - data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; - - /* - * OTP 0x52 bug work around - * define antenna 1x1 according to MIMO disabled - */ - if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) { - data->valid_tx_ant = ANT_B; - data->valid_rx_ant = ANT_B; - tx_chains = ANT_B; - rx_chains = ANT_B; - } - - data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { - /* Checking for required sections */ - if (!nvm_calib) { - IWL_ERR_DEV(dev, - "Can't parse empty Calib NVM sections\n"); - kfree(data); - return NULL; - } - /* in family 8000 Xtal calibration values moved to OTP */ - data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); - data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); - } - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { - iwl_set_hw_address(cfg, data, nvm_hw); - - iwl_init_sbands(dev, cfg, data, nvm_sw, - tx_chains, rx_chains, lar_fw_supported); - } else { - u16 lar_offset = data->nvm_version < 0xE39 ? - NVM_LAR_OFFSET_FAMILY_8000_OLD : - NVM_LAR_OFFSET_FAMILY_8000; - - lar_config = le16_to_cpup(regulatory + lar_offset); - data->lar_enabled = !!(lar_config & - NVM_LAR_ENABLED_FAMILY_8000); - - /* MAC address in family 8000 */ - iwl_set_hw_address_family_8000(dev, cfg, data, mac_override, - nvm_hw, mac_addr0, mac_addr1); - - iwl_init_sbands(dev, cfg, data, regulatory, - tx_chains, rx_chains, - lar_fw_supported && data->lar_enabled); - } - - data->calib_version = 255; - - return data; -} -IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); - -static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, - int ch_idx, u16 nvm_flags, - const struct iwl_cfg *cfg) -{ - u32 flags = NL80211_RRF_NO_HT40; - u32 last_5ghz_ht = LAST_5GHZ_HT; - - if (cfg->device_family == IWL_DEVICE_FAMILY_8000) - last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; - - if (ch_idx < NUM_2GHZ_CHANNELS && - (nvm_flags & NVM_CHANNEL_40MHZ)) { - if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) - flags &= ~NL80211_RRF_NO_HT40PLUS; - if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) - flags &= ~NL80211_RRF_NO_HT40MINUS; - } else if (nvm_chan[ch_idx] <= last_5ghz_ht && - (nvm_flags & NVM_CHANNEL_40MHZ)) { - if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) - flags &= ~NL80211_RRF_NO_HT40PLUS; - else - flags &= ~NL80211_RRF_NO_HT40MINUS; - } - - if (!(nvm_flags & NVM_CHANNEL_80MHZ)) - flags |= NL80211_RRF_NO_80MHZ; - if (!(nvm_flags & NVM_CHANNEL_160MHZ)) - flags |= NL80211_RRF_NO_160MHZ; - - if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) - flags |= NL80211_RRF_NO_IR; - - if (nvm_flags & NVM_CHANNEL_RADAR) - flags |= NL80211_RRF_DFS; - - if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) - flags |= NL80211_RRF_NO_OUTDOOR; - - /* Set the GO concurrent flag only in case that NO_IR is set. - * Otherwise it is meaningless - */ - if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && - (flags & NL80211_RRF_NO_IR)) - flags |= NL80211_RRF_GO_CONCURRENT; - - return flags; -} - -struct ieee80211_regdomain * -iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, - int num_of_ch, __le32 *channels, u16 fw_mcc) -{ - int ch_idx; - u16 ch_flags, prev_ch_flags = 0; - const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ? - iwl_nvm_channels_family_8000 : iwl_nvm_channels; - struct ieee80211_regdomain *regd; - int size_of_regd; - struct ieee80211_reg_rule *rule; - enum ieee80211_band band; - int center_freq, prev_center_freq = 0; - int valid_rules = 0; - bool new_rule; - int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ? - IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS; - - if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) - return ERR_PTR(-EINVAL); - - if (WARN_ON(num_of_ch > max_num_ch)) - num_of_ch = max_num_ch; - - IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", - num_of_ch); - - /* build a regdomain rule for every valid channel */ - size_of_regd = - sizeof(struct ieee80211_regdomain) + - num_of_ch * sizeof(struct ieee80211_reg_rule); - - regd = kzalloc(size_of_regd, GFP_KERNEL); - if (!regd) - return ERR_PTR(-ENOMEM); - - for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { - ch_flags = (u16)__le32_to_cpup(channels + ch_idx); - band = (ch_idx < NUM_2GHZ_CHANNELS) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], - band); - new_rule = false; - - if (!(ch_flags & NVM_CHANNEL_VALID)) { - IWL_DEBUG_DEV(dev, IWL_DL_LAR, - "Ch. %d Flags %x [%sGHz] - No traffic\n", - nvm_chan[ch_idx], - ch_flags, - (ch_idx >= NUM_2GHZ_CHANNELS) ? - "5.2" : "2.4"); - continue; - } - - /* we can't continue the same rule */ - if (ch_idx == 0 || prev_ch_flags != ch_flags || - center_freq - prev_center_freq > 20) { - valid_rules++; - new_rule = true; - } - - rule = ®d->reg_rules[valid_rules - 1]; - - if (new_rule) - rule->freq_range.start_freq_khz = - MHZ_TO_KHZ(center_freq - 10); - - rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); - - /* this doesn't matter - not used by FW */ - rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); - rule->power_rule.max_eirp = - DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); - - rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, - ch_flags, cfg); - - /* rely on auto-calculation to merge BW of contiguous chans */ - rule->flags |= NL80211_RRF_AUTO_BW; - rule->freq_range.max_bandwidth_khz = 0; - - prev_ch_flags = ch_flags; - prev_center_freq = center_freq; - - IWL_DEBUG_DEV(dev, IWL_DL_LAR, - "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", - center_freq, - band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", - CHECK_AND_PRINT_I(VALID), - CHECK_AND_PRINT_I(ACTIVE), - CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), - CHECK_AND_PRINT_I(40MHZ), - CHECK_AND_PRINT_I(80MHZ), - CHECK_AND_PRINT_I(160MHZ), - CHECK_AND_PRINT_I(INDOOR_ONLY), - CHECK_AND_PRINT_I(GO_CONCURRENT), - ch_flags, - ((ch_flags & NVM_CHANNEL_ACTIVE) && - !(ch_flags & NVM_CHANNEL_RADAR)) - ? "" : "not "); - } - - regd->n_reg_rules = valid_rules; - - /* set alpha2 from FW. */ - regd->alpha2[0] = fw_mcc >> 8; - regd->alpha2[1] = fw_mcc & 0xff; - - return regd; -} -IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h deleted file mode 100644 index 9f44d8188c5c..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#ifndef __iwl_nvm_parse_h__ -#define __iwl_nvm_parse_h__ - -#include -#include "iwl-eeprom-parse.h" - -/** - * iwl_parse_nvm_data - parse NVM data and return values - * - * This function parses all NVM values we need and then - * returns a (newly allocated) struct containing all the - * relevant values for driver use. The struct must be freed - * later with iwl_free_nvm_data(). - */ -struct iwl_nvm_data * -iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, - const __le16 *nvm_hw, const __le16 *nvm_sw, - const __le16 *nvm_calib, const __le16 *regulatory, - const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1, u32 hw_id); - -/** - * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW - * - * This function parses the regulatory channel data received as a - * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, - * to be fed into the regulatory core. An ERR_PTR is returned on error. - * If not given to the regulatory core, the user is responsible for freeing - * the regdomain returned here with kfree. - */ -struct ieee80211_regdomain * -iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, - int num_of_ch, __le32 *channels, u16 fw_mcc); - -#endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h deleted file mode 100644 index 2a58d6833224..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ /dev/null @@ -1,271 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_op_mode_h__ -#define __iwl_op_mode_h__ - -#include -#include - -struct iwl_op_mode; -struct iwl_trans; -struct sk_buff; -struct iwl_device_cmd; -struct iwl_rx_cmd_buffer; -struct iwl_fw; -struct iwl_cfg; - -/** - * DOC: Operational mode - what is it ? - * - * The operational mode (a.k.a. op_mode) is the layer that implements - * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses - * the transport API to access the HW. The op_mode doesn't need to know how the - * underlying HW works, since the transport layer takes care of that. - * - * There can be several op_mode: i.e. different fw APIs will require two - * different op_modes. This is why the op_mode is virtualized. - */ - -/** - * DOC: Life cycle of the Operational mode - * - * The operational mode has a very simple life cycle. - * - * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the - * capabilities advertised by the fw file (in TLV format). - * 2) The driver layer starts the op_mode (ops->start) - * 3) The op_mode registers mac80211 - * 4) The op_mode is governed by mac80211 - * 5) The driver layer stops the op_mode - */ - -/** - * struct iwl_op_mode_ops - op_mode specific operations - * - * The op_mode exports its ops so that external components can start it and - * interact with it. The driver layer typically calls the start and stop - * handlers, the transport layer calls the others. - * - * All the handlers MUST be implemented, except @rx_rss which can be left - * out *iff* the opmode will never run on hardware with multi-queue capability. - * - * @start: start the op_mode. The transport layer is already allocated. - * May sleep - * @stop: stop the op_mode. Must free all the memory allocated. - * May sleep - * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the - * HCMD this Rx responds to. Can't sleep. - * @rx_rss: data queue RX notification to the op_mode, for (data) notifications - * received on the RSS queue(s). The queue parameter indicates which of the - * RSS queues received this frame; it will always be non-zero. - * This method must not sleep. - * @queue_full: notifies that a HW queue is full. - * Must be atomic and called with BH disabled. - * @queue_not_full: notifies that a HW queue is not full any more. - * Must be atomic and called with BH disabled. - * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that - * the radio is killed. Return %true if the device should be stopped by - * the transport immediately after the call. May sleep. - * @free_skb: allows the transport layer to free skbs that haven't been - * reclaimed by the op_mode. This can happen when the driver is freed and - * there are Tx packets pending in the transport layer. - * Must be atomic - * @nic_error: error notification. Must be atomic and must be called with BH - * disabled. - * @cmd_queue_full: Called when the command queue gets full. Must be atomic and - * called with BH disabled. - * @nic_config: configure NIC, called before firmware is started. - * May sleep - * @wimax_active: invoked when WiMax becomes active. May sleep - * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3 - * entrance is aborted (e.g. due to held reference). May sleep. - * @exit_d0i3: configure the fw to exit d0i3. May sleep. - */ -struct iwl_op_mode_ops { - struct iwl_op_mode *(*start)(struct iwl_trans *trans, - const struct iwl_cfg *cfg, - const struct iwl_fw *fw, - struct dentry *dbgfs_dir); - void (*stop)(struct iwl_op_mode *op_mode); - void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb); - void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb, unsigned int queue); - void (*queue_full)(struct iwl_op_mode *op_mode, int queue); - void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); - bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); - void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); - void (*nic_error)(struct iwl_op_mode *op_mode); - void (*cmd_queue_full)(struct iwl_op_mode *op_mode); - void (*nic_config)(struct iwl_op_mode *op_mode); - void (*wimax_active)(struct iwl_op_mode *op_mode); - int (*enter_d0i3)(struct iwl_op_mode *op_mode); - int (*exit_d0i3)(struct iwl_op_mode *op_mode); -}; - -int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); -void iwl_opmode_deregister(const char *name); - -/** - * struct iwl_op_mode - operational mode - * @ops: pointer to its own ops - * - * This holds an implementation of the mac80211 / fw API. - */ -struct iwl_op_mode { - const struct iwl_op_mode_ops *ops; - - char op_mode_specific[0] __aligned(sizeof(void *)); -}; - -static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) -{ - might_sleep(); - op_mode->ops->stop(op_mode); -} - -static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb) -{ - return op_mode->ops->rx(op_mode, napi, rxb); -} - -static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb, - unsigned int queue) -{ - op_mode->ops->rx_rss(op_mode, napi, rxb, queue); -} - -static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, - int queue) -{ - op_mode->ops->queue_full(op_mode, queue); -} - -static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, - int queue) -{ - op_mode->ops->queue_not_full(op_mode, queue); -} - -static inline bool __must_check -iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) -{ - might_sleep(); - return op_mode->ops->hw_rf_kill(op_mode, state); -} - -static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, - struct sk_buff *skb) -{ - op_mode->ops->free_skb(op_mode, skb); -} - -static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode) -{ - op_mode->ops->nic_error(op_mode); -} - -static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) -{ - op_mode->ops->cmd_queue_full(op_mode); -} - -static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) -{ - might_sleep(); - op_mode->ops->nic_config(op_mode); -} - -static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) -{ - might_sleep(); - op_mode->ops->wimax_active(op_mode); -} - -static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode) -{ - might_sleep(); - - if (!op_mode->ops->enter_d0i3) - return 0; - return op_mode->ops->enter_d0i3(op_mode); -} - -static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode) -{ - might_sleep(); - - if (!op_mode->ops->exit_d0i3) - return 0; - return op_mode->ops->exit_d0i3(op_mode); -} - -#endif /* __iwl_op_mode_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c deleted file mode 100644 index a105455b6a24..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c +++ /dev/null @@ -1,471 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include - -#include "iwl-drv.h" -#include "iwl-phy-db.h" -#include "iwl-debug.h" -#include "iwl-op-mode.h" -#include "iwl-trans.h" - -#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ -#define IWL_NUM_PAPD_CH_GROUPS 9 -#define IWL_NUM_TXP_CH_GROUPS 9 - -struct iwl_phy_db_entry { - u16 size; - u8 *data; -}; - -/** - * struct iwl_phy_db - stores phy configuration and calibration data. - * - * @cfg: phy configuration. - * @calib_nch: non channel specific calibration data. - * @calib_ch: channel specific calibration data. - * @calib_ch_group_papd: calibration data related to papd channel group. - * @calib_ch_group_txp: calibration data related to tx power chanel group. - */ -struct iwl_phy_db { - struct iwl_phy_db_entry cfg; - struct iwl_phy_db_entry calib_nch; - struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; - struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; - - struct iwl_trans *trans; -}; - -enum iwl_phy_db_section_type { - IWL_PHY_DB_CFG = 1, - IWL_PHY_DB_CALIB_NCH, - IWL_PHY_DB_UNUSED, - IWL_PHY_DB_CALIB_CHG_PAPD, - IWL_PHY_DB_CALIB_CHG_TXP, - IWL_PHY_DB_MAX -}; - -#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */ - -/* - * phy db - configure operational ucode - */ -struct iwl_phy_db_cmd { - __le16 type; - __le16 length; - u8 data[]; -} __packed; - -/* for parsing of tx power channel group data that comes from the firmware*/ -struct iwl_phy_db_chg_txp { - __le32 space; - __le16 max_channel_idx; -} __packed; - -/* - * phy db - Receive phy db chunk after calibrations - */ -struct iwl_calib_res_notif_phy_db { - __le16 type; - __le16 length; - u8 data[]; -} __packed; - -struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) -{ - struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), - GFP_KERNEL); - - if (!phy_db) - return phy_db; - - phy_db->trans = trans; - - /* TODO: add default values of the phy db. */ - return phy_db; -} -IWL_EXPORT_SYMBOL(iwl_phy_db_init); - -/* - * get phy db section: returns a pointer to a phy db section specified by - * type and channel group id. - */ -static struct iwl_phy_db_entry * -iwl_phy_db_get_section(struct iwl_phy_db *phy_db, - enum iwl_phy_db_section_type type, - u16 chg_id) -{ - if (!phy_db || type >= IWL_PHY_DB_MAX) - return NULL; - - switch (type) { - case IWL_PHY_DB_CFG: - return &phy_db->cfg; - case IWL_PHY_DB_CALIB_NCH: - return &phy_db->calib_nch; - case IWL_PHY_DB_CALIB_CHG_PAPD: - if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) - return NULL; - return &phy_db->calib_ch_group_papd[chg_id]; - case IWL_PHY_DB_CALIB_CHG_TXP: - if (chg_id >= IWL_NUM_TXP_CH_GROUPS) - return NULL; - return &phy_db->calib_ch_group_txp[chg_id]; - default: - return NULL; - } - return NULL; -} - -static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db, - enum iwl_phy_db_section_type type, - u16 chg_id) -{ - struct iwl_phy_db_entry *entry = - iwl_phy_db_get_section(phy_db, type, chg_id); - if (!entry) - return; - - kfree(entry->data); - entry->data = NULL; - entry->size = 0; -} - -void iwl_phy_db_free(struct iwl_phy_db *phy_db) -{ - int i; - - if (!phy_db) - return; - - iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); - iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); - for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) - iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); - for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) - iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i); - - kfree(phy_db); -} -IWL_EXPORT_SYMBOL(iwl_phy_db_free); - -int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, - gfp_t alloc_ctx) -{ - struct iwl_calib_res_notif_phy_db *phy_db_notif = - (struct iwl_calib_res_notif_phy_db *)pkt->data; - enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type); - u16 size = le16_to_cpu(phy_db_notif->length); - struct iwl_phy_db_entry *entry; - u16 chg_id = 0; - - if (!phy_db) - return -EINVAL; - - if (type == IWL_PHY_DB_CALIB_CHG_PAPD || - type == IWL_PHY_DB_CALIB_CHG_TXP) - chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); - - entry = iwl_phy_db_get_section(phy_db, type, chg_id); - if (!entry) - return -EINVAL; - - kfree(entry->data); - entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx); - if (!entry->data) { - entry->size = 0; - return -ENOMEM; - } - - entry->size = size; - - IWL_DEBUG_INFO(phy_db->trans, - "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", - __func__, __LINE__, type, size); - - return 0; -} -IWL_EXPORT_SYMBOL(iwl_phy_db_set_section); - -static int is_valid_channel(u16 ch_id) -{ - if (ch_id <= 14 || - (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || - (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || - (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) - return 1; - return 0; -} - -static u8 ch_id_to_ch_index(u16 ch_id) -{ - if (WARN_ON(!is_valid_channel(ch_id))) - return 0xff; - - if (ch_id <= 14) - return ch_id - 1; - if (ch_id <= 64) - return (ch_id + 20) / 4; - if (ch_id <= 140) - return (ch_id - 12) / 4; - return (ch_id - 13) / 4; -} - - -static u16 channel_id_to_papd(u16 ch_id) -{ - if (WARN_ON(!is_valid_channel(ch_id))) - return 0xff; - - if (1 <= ch_id && ch_id <= 14) - return 0; - if (36 <= ch_id && ch_id <= 64) - return 1; - if (100 <= ch_id && ch_id <= 140) - return 2; - return 3; -} - -static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id) -{ - struct iwl_phy_db_chg_txp *txp_chg; - int i; - u8 ch_index = ch_id_to_ch_index(ch_id); - if (ch_index == 0xff) - return 0xff; - - for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) { - txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; - if (!txp_chg) - return 0xff; - /* - * Looking for the first channel group that its max channel is - * higher then wanted channel. - */ - if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index) - return i; - } - return 0xff; -} -static -int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, - u32 type, u8 **data, u16 *size, u16 ch_id) -{ - struct iwl_phy_db_entry *entry; - u16 ch_group_id = 0; - - if (!phy_db) - return -EINVAL; - - /* find wanted channel group */ - if (type == IWL_PHY_DB_CALIB_CHG_PAPD) - ch_group_id = channel_id_to_papd(ch_id); - else if (type == IWL_PHY_DB_CALIB_CHG_TXP) - ch_group_id = channel_id_to_txp(phy_db, ch_id); - - entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); - if (!entry) - return -EINVAL; - - *data = entry->data; - *size = entry->size; - - IWL_DEBUG_INFO(phy_db->trans, - "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", - __func__, __LINE__, type, *size); - - return 0; -} - -static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type, - u16 length, void *data) -{ - struct iwl_phy_db_cmd phy_db_cmd; - struct iwl_host_cmd cmd = { - .id = PHY_DB_CMD, - }; - - IWL_DEBUG_INFO(phy_db->trans, - "Sending PHY-DB hcmd of type %d, of length %d\n", - type, length); - - /* Set phy db cmd variables */ - phy_db_cmd.type = cpu_to_le16(type); - phy_db_cmd.length = cpu_to_le16(length); - - /* Set hcmd variables */ - cmd.data[0] = &phy_db_cmd; - cmd.len[0] = sizeof(struct iwl_phy_db_cmd); - cmd.data[1] = data; - cmd.len[1] = length; - cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; - - return iwl_trans_send_cmd(phy_db->trans, &cmd); -} - -static int iwl_phy_db_send_all_channel_groups( - struct iwl_phy_db *phy_db, - enum iwl_phy_db_section_type type, - u8 max_ch_groups) -{ - u16 i; - int err; - struct iwl_phy_db_entry *entry; - - /* Send all the channel specific groups to operational fw */ - for (i = 0; i < max_ch_groups; i++) { - entry = iwl_phy_db_get_section(phy_db, - type, - i); - if (!entry) - return -EINVAL; - - if (!entry->size) - continue; - - /* Send the requested PHY DB section */ - err = iwl_send_phy_db_cmd(phy_db, - type, - entry->size, - entry->data); - if (err) { - IWL_ERR(phy_db->trans, - "Can't SEND phy_db section %d (%d), err %d\n", - type, i, err); - return err; - } - - IWL_DEBUG_INFO(phy_db->trans, - "Sent PHY_DB HCMD, type = %d num = %d\n", - type, i); - } - - return 0; -} - -int iwl_send_phy_db_data(struct iwl_phy_db *phy_db) -{ - u8 *data = NULL; - u16 size = 0; - int err; - - IWL_DEBUG_INFO(phy_db->trans, - "Sending phy db data and configuration to runtime image\n"); - - /* Send PHY DB CFG section */ - err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG, - &data, &size, 0); - if (err) { - IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n"); - return err; - } - - err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data); - if (err) { - IWL_ERR(phy_db->trans, - "Cannot send HCMD of Phy DB cfg section\n"); - return err; - } - - err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH, - &data, &size, 0); - if (err) { - IWL_ERR(phy_db->trans, - "Cannot get Phy DB non specific channel section\n"); - return err; - } - - err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data); - if (err) { - IWL_ERR(phy_db->trans, - "Cannot send HCMD of Phy DB non specific channel section\n"); - return err; - } - - /* Send all the TXP channel specific data */ - err = iwl_phy_db_send_all_channel_groups(phy_db, - IWL_PHY_DB_CALIB_CHG_PAPD, - IWL_NUM_PAPD_CH_GROUPS); - if (err) { - IWL_ERR(phy_db->trans, - "Cannot send channel specific PAPD groups\n"); - return err; - } - - /* Send all the TXP channel specific data */ - err = iwl_phy_db_send_all_channel_groups(phy_db, - IWL_PHY_DB_CALIB_CHG_TXP, - IWL_NUM_TXP_CH_GROUPS); - if (err) { - IWL_ERR(phy_db->trans, - "Cannot send channel specific TX power groups\n"); - return err; - } - - IWL_DEBUG_INFO(phy_db->trans, - "Finished sending phy db non channel data\n"); - return 0; -} -IWL_EXPORT_SYMBOL(iwl_send_phy_db_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h deleted file mode 100644 index 9ee18d0d2d01..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h +++ /dev/null @@ -1,82 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __IWL_PHYDB_H__ -#define __IWL_PHYDB_H__ - -#include - -#include "iwl-op-mode.h" -#include "iwl-trans.h" - -struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans); - -void iwl_phy_db_free(struct iwl_phy_db *phy_db); - -int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, - gfp_t alloc_ctx); - - -int iwl_send_phy_db_data(struct iwl_phy_db *phy_db); - -#endif /* __IWL_PHYDB_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h deleted file mode 100644 index 3ab777f79e4f..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ /dev/null @@ -1,401 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_prph_h__ -#define __iwl_prph_h__ - -/* - * Registers in this file are internal, not PCI bus memory mapped. - * Driver accesses these via HBUS_TARG_PRPH_* registers. - */ -#define PRPH_BASE (0x00000) -#define PRPH_END (0xFFFFF) - -/* APMG (power management) constants */ -#define APMG_BASE (PRPH_BASE + 0x3000) -#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) -#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) -#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) -#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) -#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) -#define APMG_RFKILL_REG (APMG_BASE + 0x0014) -#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) -#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) -#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) -#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) - -#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001) -#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) -#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) - -#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) -#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) -#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) -#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) -#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) -#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ -#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) - -#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200) -#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) -#define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000) - -#define APMG_RTC_INT_STT_RFKILL (0x10000000) - -/* Device system time */ -#define DEVICE_SYSTEM_TIME_REG 0xA0206C - -/* Device NMI register */ -#define DEVICE_SET_NMI_REG 0x00a01c30 -#define DEVICE_SET_NMI_VAL_HW BIT(0) -#define DEVICE_SET_NMI_VAL_DRV BIT(7) -#define DEVICE_SET_NMI_8000_REG 0x00a01c24 -#define DEVICE_SET_NMI_8000_VAL 0x1000000 - -/* Shared registers (0x0..0x3ff, via target indirect or periphery */ -#define SHR_BASE 0x00a10000 - -/* Shared GP1 register */ -#define SHR_APMG_GP1_REG 0x01dc -#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG) -#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004 -#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000 - -/* Shared DL_CFG register */ -#define SHR_APMG_DL_CFG_REG 0x01c4 -#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG) -#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0 -#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080 -#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100 - -/* Shared APMG_XTAL_CFG register */ -#define SHR_APMG_XTAL_CFG_REG 0x1c0 -#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000 - -/* - * Device reset for family 8000 - * write to bit 24 in order to reset the CPU -*/ -#define RELEASE_CPU_RESET (0x300C) -#define RELEASE_CPU_RESET_BIT BIT(24) - -/***************************************************************************** - * 7000/3000 series SHR DTS addresses * - *****************************************************************************/ - -#define SHR_MISC_WFM_DTS_EN (0x00a10024) -#define DTSC_CFG_MODE (0x00a10604) -#define DTSC_VREF_AVG (0x00a10648) -#define DTSC_VREF5_AVG (0x00a1064c) -#define DTSC_CFG_MODE_PERIODIC (0x2) -#define DTSC_PTAT_AVG (0x00a10650) - - -/** - * Tx Scheduler - * - * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs - * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in - * host DRAM. It steers each frame's Tx command (which contains the frame - * data) into one of up to 7 prioritized Tx DMA FIFO channels within the - * device. A queue maps to only one (selectable by driver) Tx DMA channel, - * but one DMA channel may take input from several queues. - * - * Tx DMA FIFOs have dedicated purposes. - * - * For 5000 series and up, they are used differently - * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): - * - * 0 -- EDCA BK (background) frames, lowest priority - * 1 -- EDCA BE (best effort) frames, normal priority - * 2 -- EDCA VI (video) frames, higher priority - * 3 -- EDCA VO (voice) and management frames, highest priority - * 4 -- unused - * 5 -- unused - * 6 -- unused - * 7 -- Commands - * - * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. - * In addition, driver can map the remaining queues to Tx DMA/FIFO - * channels 0-3 to support 11n aggregation via EDCA DMA channels. - * - * The driver sets up each queue to work in one of two modes: - * - * 1) Scheduler-Ack, in which the scheduler automatically supports a - * block-ack (BA) window of up to 64 TFDs. In this mode, each queue - * contains TFDs for a unique combination of Recipient Address (RA) - * and Traffic Identifier (TID), that is, traffic of a given - * Quality-Of-Service (QOS) priority, destined for a single station. - * - * In scheduler-ack mode, the scheduler keeps track of the Tx status of - * each frame within the BA window, including whether it's been transmitted, - * and whether it's been acknowledged by the receiving station. The device - * automatically processes block-acks received from the receiving STA, - * and reschedules un-acked frames to be retransmitted (successful - * Tx completion may end up being out-of-order). - * - * The driver must maintain the queue's Byte Count table in host DRAM - * for this mode. - * This mode does not support fragmentation. - * - * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. - * The device may automatically retry Tx, but will retry only one frame - * at a time, until receiving ACK from receiving station, or reaching - * retry limit and giving up. - * - * The command queue (#4/#9) must use this mode! - * This mode does not require use of the Byte Count table in host DRAM. - * - * Driver controls scheduler operation via 3 means: - * 1) Scheduler registers - * 2) Shared scheduler data base in internal SRAM - * 3) Shared data in host DRAM - * - * Initialization: - * - * When loading, driver should allocate memory for: - * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. - * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory - * (1024 bytes for each queue). - * - * After receiving "Alive" response from uCode, driver must initialize - * the scheduler (especially for queue #4/#9, the command queue, otherwise - * the driver can't issue commands!): - */ -#define SCD_MEM_LOWER_BOUND (0x0000) - -/** - * Max Tx window size is the max number of contiguous TFDs that the scheduler - * can keep track of at one time when creating block-ack chains of frames. - * Note that "64" matches the number of ack bits in a block-ack packet. - */ -#define SCD_WIN_SIZE 64 -#define SCD_FRAME_LIMIT 64 - -#define SCD_TXFIFO_POS_TID (0) -#define SCD_TXFIFO_POS_RA (4) -#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) - -/* agn SCD */ -#define SCD_QUEUE_STTS_REG_POS_TXF (0) -#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) -#define SCD_QUEUE_STTS_REG_POS_WSL (4) -#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) -#define SCD_QUEUE_STTS_REG_MSK (0x017F0000) - -#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) -#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) -#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) -#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) -#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) -#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) -#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) -#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) -#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) -#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) - -/* Context Data */ -#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) -#define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) - -/* Tx status */ -#define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) -#define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) - -/* Translation Data */ -#define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) -#define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808) - -#define SCD_CONTEXT_QUEUE_OFFSET(x)\ - (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) - -#define SCD_TX_STTS_QUEUE_OFFSET(x)\ - (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16)) - -#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ - ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) - -#define SCD_BASE (PRPH_BASE + 0xa02c00) - -#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0) -#define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8) -#define SCD_AIT (SCD_BASE + 0x0c) -#define SCD_TXFACT (SCD_BASE + 0x10) -#define SCD_ACTIVE (SCD_BASE + 0x14) -#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) -#define SCD_CHAINEXT_EN (SCD_BASE + 0x244) -#define SCD_AGGR_SEL (SCD_BASE + 0x248) -#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) -#define SCD_GP_CTRL (SCD_BASE + 0x1a8) -#define SCD_EN_CTRL (SCD_BASE + 0x254) - -/*********************** END TX SCHEDULER *************************************/ - -/* tcp checksum offload */ -#define RX_EN_CSUM (0x00a00d88) - -/* Oscillator clock */ -#define OSC_CLK (0xa04068) -#define OSC_CLK_FORCE_CONTROL (0x8) - -#define FH_UCODE_LOAD_STATUS (0x1AF0) -#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70) -enum secure_load_status_reg { - LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001, - LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003, - LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007, - LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8, - LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00, -}; - -#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38) -#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C) -#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) -#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) - -#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000) -#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000) -#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) -#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) - -/* Rx FIFO */ -#define RXF_SIZE_ADDR (0xa00c88) -#define RXF_RD_D_SPACE (0xa00c40) -#define RXF_RD_WR_PTR (0xa00c50) -#define RXF_RD_RD_PTR (0xa00c54) -#define RXF_RD_FENCE_PTR (0xa00c4c) -#define RXF_SET_FENCE_MODE (0xa00c14) -#define RXF_LD_WR2FENCE (0xa00c1c) -#define RXF_FIFO_RD_FENCE_INC (0xa00c68) -#define RXF_SIZE_BYTE_CND_POS (7) -#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) -#define RXF_DIFF_FROM_PREV (0x200) - -#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) -#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) - -/* Tx FIFO */ -#define TXF_FIFO_ITEM_CNT (0xa00438) -#define TXF_WR_PTR (0xa00414) -#define TXF_RD_PTR (0xa00410) -#define TXF_FENCE_PTR (0xa00418) -#define TXF_LOCK_FENCE (0xa00424) -#define TXF_LARC_NUM (0xa0043c) -#define TXF_READ_MODIFY_DATA (0xa00448) -#define TXF_READ_MODIFY_ADDR (0xa0044c) - -/* FW monitor */ -#define MON_BUFF_SAMPLE_CTL (0xa03c00) -#define MON_BUFF_BASE_ADDR (0xa03c3c) -#define MON_BUFF_END_ADDR (0xa03c40) -#define MON_BUFF_WRPTR (0xa03c44) -#define MON_BUFF_CYCLE_CNT (0xa03c48) - -#define MON_DMARB_RD_CTL_ADDR (0xa03c60) -#define MON_DMARB_RD_DATA_ADDR (0xa03c5c) - -#define DBGC_IN_SAMPLE (0xa03c00) - -/* enable the ID buf for read */ -#define WFPM_PS_CTL_CLR 0xA0300C -#define WFMP_MAC_ADDR_0 0xA03080 -#define WFMP_MAC_ADDR_1 0xA03084 -#define LMPM_PMG_EN 0xA01CEC -#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 -#define RFIC_REG_RD 0xAD0470 -#define WFPM_CTRL_REG 0xA03030 -enum { - ENABLE_WFPM = BIT(31), - WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, -}; - -#define AUX_MISC_REG 0xA200B0 -enum { - HW_STEP_LOCATION_BITS = 24, -}; - -#define AUX_MISC_MASTER1_EN 0xA20818 -enum aux_misc_master1_en { - AUX_MISC_MASTER1_EN_SBE_MSK = 0x1, -}; - -#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 -#define RSA_ENABLE 0xA24B08 -#define PREG_AUX_BUS_WPROT_0 0xA04CC0 -#define SB_CPU_1_STATUS 0xA01E30 -#define SB_CPU_2_STATUS 0xA01E34 - -/* FW chicken bits */ -#define LMPM_CHICK 0xA01FF8 -enum { - LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), -}; - -/* FW chicken bits */ -#define LMPM_PAGE_PASS_NOTIF 0xA03824 -enum { - LMPM_PAGE_PASS_NOTIF_POS = BIT(20), -}; - -#endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-scd.h b/drivers/net/wireless/iwlwifi/iwl-scd.h deleted file mode 100644 index f2353ebf2666..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-scd.h +++ /dev/null @@ -1,143 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __iwl_scd_h__ -#define __iwl_scd_h__ - -#include "iwl-trans.h" -#include "iwl-io.h" -#include "iwl-prph.h" - - -static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans, - u16 txq_id) -{ - iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); -} - -static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans, - u16 txq_id) -{ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); -} - -static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans, - u16 txq_id) -{ - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); -} - -static inline void iwl_scd_disable_agg(struct iwl_trans *trans) -{ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0); -} - -static inline void iwl_scd_activate_fifos(struct iwl_trans *trans) -{ - iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7)); -} - -static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans) -{ - iwl_write_prph(trans, SCD_TXFACT, 0); -} - -static inline void iwl_scd_enable_set_active(struct iwl_trans *trans, - u32 value) -{ - iwl_write_prph(trans, SCD_EN_CTRL, value); -} - -static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) -{ - if (chnl < 20) - return SCD_BASE + 0x18 + chnl * 4; - WARN_ON_ONCE(chnl >= 32); - return SCD_BASE + 0x284 + (chnl - 20) * 4; -} - -static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl) -{ - if (chnl < 20) - return SCD_BASE + 0x68 + chnl * 4; - WARN_ON_ONCE(chnl >= 32); - return SCD_BASE + 0x2B4 + chnl * 4; -} - -static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl) -{ - if (chnl < 20) - return SCD_BASE + 0x10c + chnl * 4; - WARN_ON_ONCE(chnl >= 32); - return SCD_BASE + 0x334 + chnl * 4; -} - -static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans, - u16 txq_id) -{ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - -#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c deleted file mode 100644 index 71610968c365..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-trans.c +++ /dev/null @@ -1,114 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include "iwl-trans.h" - -struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, - struct device *dev, - const struct iwl_cfg *cfg, - const struct iwl_trans_ops *ops, - size_t dev_cmd_headroom) -{ - struct iwl_trans *trans; -#ifdef CONFIG_LOCKDEP - static struct lock_class_key __key; -#endif - - trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL); - if (!trans) - return NULL; - -#ifdef CONFIG_LOCKDEP - lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", - &__key, 0); -#endif - - trans->dev = dev; - trans->cfg = cfg; - trans->ops = ops; - trans->dev_cmd_headroom = dev_cmd_headroom; - trans->num_rx_queues = 1; - - snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), - "iwl_cmd_pool:%s", dev_name(trans->dev)); - trans->dev_cmd_pool = - kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd) - + trans->dev_cmd_headroom, - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); - if (!trans->dev_cmd_pool) - goto free; - - return trans; - free: - kfree(trans); - return NULL; -} - -void iwl_trans_free(struct iwl_trans *trans) -{ - kmem_cache_destroy(trans->dev_cmd_pool); - kfree(trans); -} diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h deleted file mode 100644 index 6f76525088f0..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ /dev/null @@ -1,1125 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_trans_h__ -#define __iwl_trans_h__ - -#include -#include /* for page_address */ -#include - -#include "iwl-debug.h" -#include "iwl-config.h" -#include "iwl-fw.h" -#include "iwl-op-mode.h" - -/** - * DOC: Transport layer - what is it ? - * - * The transport layer is the layer that deals with the HW directly. It provides - * an abstraction of the underlying HW to the upper layer. The transport layer - * doesn't provide any policy, algorithm or anything of this kind, but only - * mechanisms to make the HW do something. It is not completely stateless but - * close to it. - * We will have an implementation for each different supported bus. - */ - -/** - * DOC: Life cycle of the transport layer - * - * The transport layer has a very precise life cycle. - * - * 1) A helper function is called during the module initialization and - * registers the bus driver's ops with the transport's alloc function. - * 2) Bus's probe calls to the transport layer's allocation functions. - * Of course this function is bus specific. - * 3) This allocation functions will spawn the upper layer which will - * register mac80211. - * - * 4) At some point (i.e. mac80211's start call), the op_mode will call - * the following sequence: - * start_hw - * start_fw - * - * 5) Then when finished (or reset): - * stop_device - * - * 6) Eventually, the free function will be called. - */ - -/** - * DOC: Host command section - * - * A host command is a command issued by the upper layer to the fw. There are - * several versions of fw that have several APIs. The transport layer is - * completely agnostic to these differences. - * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), - */ -#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) -#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) -#define SEQ_TO_INDEX(s) ((s) & 0xff) -#define INDEX_TO_SEQ(i) ((i) & 0xff) -#define SEQ_RX_FRAME cpu_to_le16(0x8000) - -/* - * those functions retrieve specific information from - * the id field in the iwl_host_cmd struct which contains - * the command id, the group id and the version of the command - * and vice versa -*/ -static inline u8 iwl_cmd_opcode(u32 cmdid) -{ - return cmdid & 0xFF; -} - -static inline u8 iwl_cmd_groupid(u32 cmdid) -{ - return ((cmdid & 0xFF00) >> 8); -} - -static inline u8 iwl_cmd_version(u32 cmdid) -{ - return ((cmdid & 0xFF0000) >> 16); -} - -static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) -{ - return opcode + (groupid << 8) + (version << 16); -} - -/* make u16 wide id out of u8 group and opcode */ -#define WIDE_ID(grp, opcode) ((grp << 8) | opcode) - -/* due to the conversion, this group is special; new groups - * should be defined in the appropriate fw-api header files - */ -#define IWL_ALWAYS_LONG_GROUP 1 - -/** - * struct iwl_cmd_header - * - * This header format appears in the beginning of each command sent from the - * driver, and each response/notification received from uCode. - */ -struct iwl_cmd_header { - u8 cmd; /* Command ID: REPLY_RXON, etc. */ - u8 group_id; - /* - * The driver sets up the sequence number to values of its choosing. - * uCode does not use this value, but passes it back to the driver - * when sending the response to each driver-originated command, so - * the driver can match the response to the command. Since the values - * don't get used by uCode, the driver may set up an arbitrary format. - * - * There is one exception: uCode sets bit 15 when it originates - * the response/notification, i.e. when the response/notification - * is not a direct response to a command sent by the driver. For - * example, uCode issues REPLY_RX when it sends a received frame - * to the driver; it is not a direct response to any driver command. - * - * The Linux driver uses the following format: - * - * 0:7 tfd index - position within TX queue - * 8:12 TX queue id - * 13:14 reserved - * 15 unsolicited RX or uCode-originated notification - */ - __le16 sequence; -} __packed; - -/** - * struct iwl_cmd_header_wide - * - * This header format appears in the beginning of each command sent from the - * driver, and each response/notification received from uCode. - * this is the wide version that contains more information about the command - * like length, version and command type - */ -struct iwl_cmd_header_wide { - u8 cmd; - u8 group_id; - __le16 sequence; - __le16 length; - u8 reserved; - u8 version; -} __packed; - -#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ -#define FH_RSCSR_FRAME_INVALID 0x55550000 -#define FH_RSCSR_FRAME_ALIGN 0x40 - -struct iwl_rx_packet { - /* - * The first 4 bytes of the RX frame header contain both the RX frame - * size and some flags. - * Bit fields: - * 31: flag flush RB request - * 30: flag ignore TC (terminal counter) request - * 29: flag fast IRQ request - * 28-14: Reserved - * 13-00: RX frame size - */ - __le32 len_n_flags; - struct iwl_cmd_header hdr; - u8 data[]; -} __packed; - -static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) -{ - return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; -} - -static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) -{ - return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); -} - -/** - * enum CMD_MODE - how to send the host commands ? - * - * @CMD_ASYNC: Return right away and don't wait for the response - * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of - * the response. The caller needs to call iwl_free_resp when done. - * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the - * command queue, but after other high priority commands. Valid only - * with CMD_ASYNC. - * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle. - * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. - * @CMD_WAKE_UP_TRANS: The command response should wake up the trans - * (i.e. mark it as non-idle). - * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to - * check that we leave enough room for the TBs bitmap which needs 20 bits. - */ -enum CMD_MODE { - CMD_ASYNC = BIT(0), - CMD_WANT_SKB = BIT(1), - CMD_SEND_IN_RFKILL = BIT(2), - CMD_HIGH_PRIO = BIT(3), - CMD_SEND_IN_IDLE = BIT(4), - CMD_MAKE_TRANS_IDLE = BIT(5), - CMD_WAKE_UP_TRANS = BIT(6), - - CMD_TB_BITMAP_POS = 11, -}; - -#define DEF_CMD_PAYLOAD_SIZE 320 - -/** - * struct iwl_device_cmd - * - * For allocation of the command and tx queues, this establishes the overall - * size of the largest command we send to uCode, except for commands that - * aren't fully copied and use other TFD space. - */ -struct iwl_device_cmd { - union { - struct { - struct iwl_cmd_header hdr; /* uCode API */ - u8 payload[DEF_CMD_PAYLOAD_SIZE]; - }; - struct { - struct iwl_cmd_header_wide hdr_wide; - u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - - sizeof(struct iwl_cmd_header_wide) + - sizeof(struct iwl_cmd_header)]; - }; - }; -} __packed; - -#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) - -/* - * number of transfer buffers (fragments) per transmit frame descriptor; - * this is just the driver's idea, the hardware supports 20 - */ -#define IWL_MAX_CMD_TBS_PER_TFD 2 - -/** - * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command - * - * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's - * ring. The transport layer doesn't map the command's buffer to DMA, but - * rather copies it to a previously allocated DMA buffer. This flag tells - * the transport layer not to copy the command, but to map the existing - * buffer (that is passed in) instead. This saves the memcpy and allows - * commands that are bigger than the fixed buffer to be submitted. - * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. - * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this - * chunk internally and free it again after the command completes. This - * can (currently) be used only once per command. - * Note that a TFD entry after a DUP one cannot be a normal copied one. - */ -enum iwl_hcmd_dataflag { - IWL_HCMD_DFL_NOCOPY = BIT(0), - IWL_HCMD_DFL_DUP = BIT(1), -}; - -/** - * struct iwl_host_cmd - Host command to the uCode - * - * @data: array of chunks that composes the data of the host command - * @resp_pkt: response packet, if %CMD_WANT_SKB was set - * @_rx_page_order: (internally used to free response packet) - * @_rx_page_addr: (internally used to free response packet) - * @flags: can be CMD_* - * @len: array of the lengths of the chunks in data - * @dataflags: IWL_HCMD_DFL_* - * @id: command id of the host command, for wide commands encoding the - * version and group as well - */ -struct iwl_host_cmd { - const void *data[IWL_MAX_CMD_TBS_PER_TFD]; - struct iwl_rx_packet *resp_pkt; - unsigned long _rx_page_addr; - u32 _rx_page_order; - - u32 flags; - u32 id; - u16 len[IWL_MAX_CMD_TBS_PER_TFD]; - u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; -}; - -static inline void iwl_free_resp(struct iwl_host_cmd *cmd) -{ - free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); -} - -struct iwl_rx_cmd_buffer { - struct page *_page; - int _offset; - bool _page_stolen; - u32 _rx_page_order; - unsigned int truesize; -}; - -static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) -{ - return (void *)((unsigned long)page_address(r->_page) + r->_offset); -} - -static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) -{ - return r->_offset; -} - -static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) -{ - r->_page_stolen = true; - get_page(r->_page); - return r->_page; -} - -static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) -{ - __free_pages(r->_page, r->_rx_page_order); -} - -#define MAX_NO_RECLAIM_CMDS 6 - -#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) - -/* - * Maximum number of HW queues the transport layer - * currently supports - */ -#define IWL_MAX_HW_QUEUES 32 -#define IWL_MAX_TID_COUNT 8 -#define IWL_FRAME_LIMIT 64 -#define IWL_MAX_RX_HW_QUEUES 16 - -/** - * enum iwl_wowlan_status - WoWLAN image/device status - * @IWL_D3_STATUS_ALIVE: firmware is still running after resume - * @IWL_D3_STATUS_RESET: device was reset while suspended - */ -enum iwl_d3_status { - IWL_D3_STATUS_ALIVE, - IWL_D3_STATUS_RESET, -}; - -/** - * enum iwl_trans_status: transport status flags - * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed - * @STATUS_DEVICE_ENABLED: APM is enabled - * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) - * @STATUS_INT_ENABLED: interrupts are enabled - * @STATUS_RFKILL: the HW RFkill switch is in KILL position - * @STATUS_FW_ERROR: the fw is in error state - * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands - * are sent - * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent - * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation - */ -enum iwl_trans_status { - STATUS_SYNC_HCMD_ACTIVE, - STATUS_DEVICE_ENABLED, - STATUS_TPOWER_PMI, - STATUS_INT_ENABLED, - STATUS_RFKILL, - STATUS_FW_ERROR, - STATUS_TRANS_GOING_IDLE, - STATUS_TRANS_IDLE, - STATUS_TRANS_DEAD, -}; - -/** - * struct iwl_trans_config - transport configuration - * - * @op_mode: pointer to the upper layer. - * @cmd_queue: the index of the command queue. - * Must be set before start_fw. - * @cmd_fifo: the fifo for host commands - * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. - * @no_reclaim_cmds: Some devices erroneously don't set the - * SEQ_RX_FRAME bit on some notifications, this is the - * list of such notifications to filter. Max length is - * %MAX_NO_RECLAIM_CMDS. - * @n_no_reclaim_cmds: # of commands in list - * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs, - * if unset 4k will be the RX buffer size - * @bc_table_dword: set to true if the BC table expects the byte count to be - * in DWORD (as opposed to bytes) - * @scd_set_active: should the transport configure the SCD for HCMD queue - * @wide_cmd_header: firmware supports wide host command header - * @command_names: array of command names, must be 256 entries - * (one for each command); for debugging only - * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until - * we get the ALIVE from the uCode - */ -struct iwl_trans_config { - struct iwl_op_mode *op_mode; - - u8 cmd_queue; - u8 cmd_fifo; - unsigned int cmd_q_wdg_timeout; - const u8 *no_reclaim_cmds; - unsigned int n_no_reclaim_cmds; - - bool rx_buf_size_8k; - bool bc_table_dword; - bool scd_set_active; - bool wide_cmd_header; - const char *const *command_names; - - u32 sdio_adma_addr; -}; - -struct iwl_trans_dump_data { - u32 len; - u8 data[]; -}; - -struct iwl_trans; - -struct iwl_trans_txq_scd_cfg { - u8 fifo; - s8 sta_id; - u8 tid; - bool aggregate; - int frame_limit; -}; - -/** - * struct iwl_trans_ops - transport specific operations - * - * All the handlers MUST be implemented - * - * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken - * out of a low power state. From that point on, the HW can send - * interrupts. May sleep. - * @op_mode_leave: Turn off the HW RF kill indication if on - * May sleep - * @start_fw: allocates and inits all the resources for the transport - * layer. Also kick a fw image. - * May sleep - * @fw_alive: called when the fw sends alive notification. If the fw provides - * the SCD base address in SRAM, then provide it here, or 0 otherwise. - * May sleep - * @stop_device: stops the whole device (embedded CPU put to reset) and stops - * the HW. If low_power is true, the NIC will be put in low power state. - * From that point on, the HW will be stopped but will still issue an - * interrupt if the HW RF kill switch is triggered. - * This callback must do the right thing and not crash even if %start_hw() - * was called but not &start_fw(). May sleep. - * @d3_suspend: put the device into the correct mode for WoWLAN during - * suspend. This is optional, if not implemented WoWLAN will not be - * supported. This callback may sleep. - * @d3_resume: resume the device after WoWLAN, enabling the opmode to - * talk to the WoWLAN image to get its status. This is optional, if not - * implemented WoWLAN will not be supported. This callback may sleep. - * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. - * If RFkill is asserted in the middle of a SYNC host command, it must - * return -ERFKILL straight away. - * May sleep only if CMD_ASYNC is not set - * @tx: send an skb - * Must be atomic - * @reclaim: free packet until ssn. Returns a list of freed packets. - * Must be atomic - * @txq_enable: setup a queue. To setup an AC queue, use the - * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before - * this one. The op_mode must not configure the HCMD queue. The scheduler - * configuration may be %NULL, in which case the hardware will not be - * configured. May sleep. - * @txq_disable: de-configure a Tx queue to send AMPDUs - * Must be atomic - * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. - * @freeze_txq_timer: prevents the timer of the queue from firing until the - * queue is set to awake. Must be atomic. - * @dbgfs_register: add the dbgfs files under this directory. Files will be - * automatically deleted. - * @write8: write a u8 to a register at offset ofs from the BAR - * @write32: write a u32 to a register at offset ofs from the BAR - * @read32: read a u32 register at offset ofs from the BAR - * @read_prph: read a DWORD from a periphery register - * @write_prph: write a DWORD to a periphery register - * @read_mem: read device's SRAM in DWORD - * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory - * will be zeroed. - * @configure: configure parameters required by the transport layer from - * the op_mode. May be called several times before start_fw, can't be - * called after that. - * @set_pmi: set the power pmi state - * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. - * Sleeping is not allowed between grab_nic_access and - * release_nic_access. - * @release_nic_access: let the NIC go to sleep. The "flags" parameter - * must be the same one that was sent before to the grab_nic_access. - * @set_bits_mask - set SRAM register according to value and mask. - * @ref: grab a reference to the transport/FW layers, disallowing - * certain low power states - * @unref: release a reference previously taken with @ref. Note that - * initially the reference count is 1, making an initial @unref - * necessary to allow low power states. - * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last - * TX'ed commands and similar. The buffer will be vfree'd by the caller. - * Note that the transport must fill in the proper file headers. - */ -struct iwl_trans_ops { - - int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); - void (*op_mode_leave)(struct iwl_trans *iwl_trans); - int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, - bool run_in_rfkill); - int (*update_sf)(struct iwl_trans *trans, - struct iwl_sf_region *st_fwrd_space); - void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); - void (*stop_device)(struct iwl_trans *trans, bool low_power); - - void (*d3_suspend)(struct iwl_trans *trans, bool test); - int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test); - - int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); - - int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue); - void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, - struct sk_buff_head *skbs); - - void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int queue_wdg_timeout); - void (*txq_disable)(struct iwl_trans *trans, int queue, - bool configure_scd); - - int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); - int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); - void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, - bool freeze); - - void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); - void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); - u32 (*read32)(struct iwl_trans *trans, u32 ofs); - u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); - void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); - int (*read_mem)(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); - int (*write_mem)(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords); - void (*configure)(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg); - void (*set_pmi)(struct iwl_trans *trans, bool state); - bool (*grab_nic_access)(struct iwl_trans *trans, bool silent, - unsigned long *flags); - void (*release_nic_access)(struct iwl_trans *trans, - unsigned long *flags); - void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, - u32 value); - void (*ref)(struct iwl_trans *trans); - void (*unref)(struct iwl_trans *trans); - int (*suspend)(struct iwl_trans *trans); - void (*resume)(struct iwl_trans *trans); - - struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, - struct iwl_fw_dbg_trigger_tlv - *trigger); -}; - -/** - * enum iwl_trans_state - state of the transport layer - * - * @IWL_TRANS_NO_FW: no fw has sent an alive response - * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response - */ -enum iwl_trans_state { - IWL_TRANS_NO_FW = 0, - IWL_TRANS_FW_ALIVE = 1, -}; - -/** - * enum iwl_d0i3_mode - d0i3 mode - * - * @IWL_D0I3_MODE_OFF - d0i3 is disabled - * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle - * (e.g. no active references) - * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend - * (in case of 'any' trigger) - */ -enum iwl_d0i3_mode { - IWL_D0I3_MODE_OFF = 0, - IWL_D0I3_MODE_ON_IDLE, - IWL_D0I3_MODE_ON_SUSPEND, -}; - -/** - * struct iwl_trans - transport common data - * - * @ops - pointer to iwl_trans_ops - * @op_mode - pointer to the op_mode - * @cfg - pointer to the configuration - * @status: a bit-mask of transport status flags - * @dev - pointer to struct device * that represents the device - * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. - * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. - * @hw_id: a u32 with the ID of the device / sub-device. - * Set during transport allocation. - * @hw_id_str: a string with info about HW ID. Set during transport allocation. - * @pm_support: set to true in start_hw if link pm is supported - * @ltr_enabled: set to true if the LTR is enabled - * @num_rx_queues: number of RX queues allocated by the transport; - * the transport must set this before calling iwl_drv_start() - * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. - * The user should use iwl_trans_{alloc,free}_tx_cmd. - * @dev_cmd_headroom: room needed for the transport's private use before the - * device_cmd for Tx - for internal use only - * The user should use iwl_trans_{alloc,free}_tx_cmd. - * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before - * starting the firmware, used for tracing - * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the - * start of the 802.11 header in the @rx_mpdu_cmd - * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) - * @dbg_dest_tlv: points to the destination TLV for debug - * @dbg_conf_tlv: array of pointers to configuration TLVs for debug - * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug - * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv - * @paging_req_addr: The location were the FW will upload / download the pages - * from. The address is set by the opmode - * @paging_db: Pointer to the opmode paging data base, the pointer is set by - * the opmode. - * @paging_download_buf: Buffer used for copying all of the pages before - * downloading them to the FW. The buffer is allocated in the opmode - */ -struct iwl_trans { - const struct iwl_trans_ops *ops; - struct iwl_op_mode *op_mode; - const struct iwl_cfg *cfg; - enum iwl_trans_state state; - unsigned long status; - - struct device *dev; - u32 max_skb_frags; - u32 hw_rev; - u32 hw_id; - char hw_id_str[52]; - - u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; - - bool pm_support; - bool ltr_enabled; - - u8 num_rx_queues; - - /* The following fields are internal only */ - struct kmem_cache *dev_cmd_pool; - size_t dev_cmd_headroom; - char dev_cmd_pool_name[50]; - - struct dentry *dbgfs_dir; - -#ifdef CONFIG_LOCKDEP - struct lockdep_map sync_cmd_lockdep_map; -#endif - - u64 dflt_pwr_limit; - - const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; - u8 dbg_dest_reg_num; - - /* - * Paging parameters - All of the parameters should be set by the - * opmode when paging is enabled - */ - u32 paging_req_addr; - struct iwl_fw_paging *paging_db; - void *paging_download_buf; - - enum iwl_d0i3_mode d0i3_mode; - - bool wowlan_d0i3; - - /* pointer to trans specific struct */ - /*Ensure that this pointer will always be aligned to sizeof pointer */ - char trans_specific[0] __aligned(sizeof(void *)); -}; - -static inline void iwl_trans_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) -{ - trans->op_mode = trans_cfg->op_mode; - - trans->ops->configure(trans, trans_cfg); -} - -static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) -{ - might_sleep(); - - return trans->ops->start_hw(trans, low_power); -} - -static inline int iwl_trans_start_hw(struct iwl_trans *trans) -{ - return trans->ops->start_hw(trans, true); -} - -static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) -{ - might_sleep(); - - if (trans->ops->op_mode_leave) - trans->ops->op_mode_leave(trans); - - trans->op_mode = NULL; - - trans->state = IWL_TRANS_NO_FW; -} - -static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) -{ - might_sleep(); - - trans->state = IWL_TRANS_FW_ALIVE; - - trans->ops->fw_alive(trans, scd_addr); -} - -static inline int iwl_trans_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, - bool run_in_rfkill) -{ - might_sleep(); - - WARN_ON_ONCE(!trans->rx_mpdu_cmd); - - clear_bit(STATUS_FW_ERROR, &trans->status); - return trans->ops->start_fw(trans, fw, run_in_rfkill); -} - -static inline int iwl_trans_update_sf(struct iwl_trans *trans, - struct iwl_sf_region *st_fwrd_space) -{ - might_sleep(); - - if (trans->ops->update_sf) - return trans->ops->update_sf(trans, st_fwrd_space); - - return 0; -} - -static inline void _iwl_trans_stop_device(struct iwl_trans *trans, - bool low_power) -{ - might_sleep(); - - trans->ops->stop_device(trans, low_power); - - trans->state = IWL_TRANS_NO_FW; -} - -static inline void iwl_trans_stop_device(struct iwl_trans *trans) -{ - _iwl_trans_stop_device(trans, true); -} - -static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) -{ - might_sleep(); - if (trans->ops->d3_suspend) - trans->ops->d3_suspend(trans, test); -} - -static inline int iwl_trans_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status, - bool test) -{ - might_sleep(); - if (!trans->ops->d3_resume) - return 0; - - return trans->ops->d3_resume(trans, status, test); -} - -static inline void iwl_trans_ref(struct iwl_trans *trans) -{ - if (trans->ops->ref) - trans->ops->ref(trans); -} - -static inline void iwl_trans_unref(struct iwl_trans *trans) -{ - if (trans->ops->unref) - trans->ops->unref(trans); -} - -static inline int iwl_trans_suspend(struct iwl_trans *trans) -{ - if (!trans->ops->suspend) - return 0; - - return trans->ops->suspend(trans); -} - -static inline void iwl_trans_resume(struct iwl_trans *trans) -{ - if (trans->ops->resume) - trans->ops->resume(trans); -} - -static inline struct iwl_trans_dump_data * -iwl_trans_dump_data(struct iwl_trans *trans, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - if (!trans->ops->dump_data) - return NULL; - return trans->ops->dump_data(trans, trigger); -} - -static inline int iwl_trans_send_cmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - int ret; - - if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL, &trans->status))) - return -ERFKILL; - - if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) - return -EIO; - - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } - - if (!(cmd->flags & CMD_ASYNC)) - lock_map_acquire_read(&trans->sync_cmd_lockdep_map); - - ret = trans->ops->send_cmd(trans, cmd); - - if (!(cmd->flags & CMD_ASYNC)) - lock_map_release(&trans->sync_cmd_lockdep_map); - - return ret; -} - -static inline struct iwl_device_cmd * -iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) -{ - u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); - - if (unlikely(dev_cmd_ptr == NULL)) - return NULL; - - return (struct iwl_device_cmd *) - (dev_cmd_ptr + trans->dev_cmd_headroom); -} - -static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, - struct iwl_device_cmd *dev_cmd) -{ - u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom; - - kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr); -} - -static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue) -{ - if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) - return -EIO; - - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - - return trans->ops->tx(trans, skb, dev_cmd, queue); -} - -static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, - int ssn, struct sk_buff_head *skbs) -{ - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - - trans->ops->reclaim(trans, queue, ssn, skbs); -} - -static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, - bool configure_scd) -{ - trans->ops->txq_disable(trans, queue, configure_scd); -} - -static inline void -iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int queue_wdg_timeout) -{ - might_sleep(); - - if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - - trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); -} - -static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, - int fifo, int sta_id, int tid, - int frame_limit, u16 ssn, - unsigned int queue_wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .sta_id = sta_id, - .tid = tid, - .frame_limit = frame_limit, - .aggregate = sta_id >= 0, - }; - - iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); -} - -static inline -void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, - unsigned int queue_wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .sta_id = -1, - .tid = IWL_MAX_TID_COUNT, - .frame_limit = IWL_FRAME_LIMIT, - .aggregate = false, - }; - - iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); -} - -static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, - unsigned long txqs, - bool freeze) -{ - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - - if (trans->ops->freeze_txq_timer) - trans->ops->freeze_txq_timer(trans, txqs, freeze); -} - -static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, - u32 txqs) -{ - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - - return trans->ops->wait_tx_queue_empty(trans, txqs); -} - -static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) -{ - return trans->ops->dbgfs_register(trans, dir); -} - -static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) -{ - trans->ops->write8(trans, ofs, val); -} - -static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) -{ - trans->ops->write32(trans, ofs, val); -} - -static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) -{ - return trans->ops->read32(trans, ofs); -} - -static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) -{ - return trans->ops->read_prph(trans, ofs); -} - -static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, - u32 val) -{ - return trans->ops->write_prph(trans, ofs, val); -} - -static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) -{ - return trans->ops->read_mem(trans, addr, buf, dwords); -} - -#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ - do { \ - if (__builtin_constant_p(bufsize)) \ - BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ - } while (0) - -static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) -{ - u32 value; - - if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) - return 0xa5a5a5a5; - - return value; -} - -static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) -{ - return trans->ops->write_mem(trans, addr, buf, dwords); -} - -static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, - u32 val) -{ - return iwl_trans_write_mem(trans, addr, &val, 1); -} - -static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) -{ - if (trans->ops->set_pmi) - trans->ops->set_pmi(trans, state); -} - -static inline void -iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) -{ - trans->ops->set_bits_mask(trans, reg, mask, value); -} - -#define iwl_trans_grab_nic_access(trans, silent, flags) \ - __cond_lock(nic_access, \ - likely((trans)->ops->grab_nic_access(trans, silent, flags))) - -static inline void __releases(nic_access) -iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) -{ - trans->ops->release_nic_access(trans, flags); - __release(nic_access); -} - -static inline void iwl_trans_fw_error(struct iwl_trans *trans) -{ - if (WARN_ON_ONCE(!trans->op_mode)) - return; - - /* prevent double restarts due to the same erroneous FW */ - if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) - iwl_op_mode_nic_error(trans->op_mode); -} - -/***************************************************** - * transport helper functions - *****************************************************/ -struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, - struct device *dev, - const struct iwl_cfg *cfg, - const struct iwl_trans_ops *ops, - size_t dev_cmd_headroom); -void iwl_trans_free(struct iwl_trans *trans); - -/***************************************************** -* driver (transport) register/unregister functions -******************************************************/ -int __must_check iwl_pci_register_driver(void); -void iwl_pci_unregister_driver(void); - -#endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile deleted file mode 100644 index 8c2c3d13b092..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -obj-$(CONFIG_IWLMVM) += iwlmvm.o -iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o -iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o -iwlmvm-y += scan.o time-event.o rs.o -iwlmvm-y += power.o coex.o coex_legacy.o -iwlmvm-y += tt.o offloading.o tdls.o -iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o -iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o -iwlmvm-$(CONFIG_PM_SLEEP) += d3.o - -ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c deleted file mode 100644 index a1376539d2dc..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/binding.c +++ /dev/null @@ -1,211 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include "fw-api.h" -#include "mvm.h" - -struct iwl_mvm_iface_iterator_data { - struct ieee80211_vif *ignore_vif; - int idx; - - struct iwl_mvm_phy_ctxt *phyctxt; - - u16 ids[MAX_MACS_IN_BINDING]; - u16 colors[MAX_MACS_IN_BINDING]; -}; - -static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, - struct iwl_mvm_iface_iterator_data *data) -{ - struct iwl_binding_cmd cmd; - struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; - int i, ret; - u32 status; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, - phyctxt->color)); - cmd.action = cpu_to_le32(action); - cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, - phyctxt->color)); - - for (i = 0; i < MAX_MACS_IN_BINDING; i++) - cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); - for (i = 0; i < data->idx; i++) - cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], - data->colors[i])); - - status = 0; - ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - sizeof(cmd), &cmd, &status); - if (ret) { - IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", - action, ret); - return ret; - } - - if (status) { - IWL_ERR(mvm, "Binding command failed: %u\n", status); - ret = -EIO; - } - - return ret; -} - -static void iwl_mvm_iface_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_iface_iterator_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (vif == data->ignore_vif) - return; - - if (mvmvif->phy_ctxt != data->phyctxt) - return; - - if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) - return; - - data->ids[data->idx] = mvmvif->id; - data->colors[data->idx] = mvmvif->color; - data->idx++; -} - -static int iwl_mvm_binding_update(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *phyctxt, - bool add) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_iface_iterator_data data = { - .ignore_vif = vif, - .phyctxt = phyctxt, - }; - u32 action = FW_CTXT_ACTION_MODIFY; - - lockdep_assert_held(&mvm->mutex); - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_iface_iterator, - &data); - - /* - * If there are no other interfaces yet we - * need to create a new binding. - */ - if (data.idx == 0) { - if (add) - action = FW_CTXT_ACTION_ADD; - else - action = FW_CTXT_ACTION_REMOVE; - } - - if (add) { - if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) - return -EINVAL; - - data.ids[data.idx] = mvmvif->id; - data.colors[data.idx] = mvmvif->color; - data.idx++; - } - - return iwl_mvm_binding_cmd(mvm, action, &data); -} - -int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) - return -EINVAL; - - /* - * Update SF - Disable if needed. if this fails, SF might still be on - * while many macs are bound, which is forbidden - so fail the binding. - */ - if (iwl_mvm_sf_update(mvm, vif, false)) - return -EINVAL; - - return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true); -} - -int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) - return -EINVAL; - - ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false); - - if (!ret) - if (iwl_mvm_sf_update(mvm, vif, true)) - IWL_ERR(mvm, "Failed to update SF state\n"); - - return ret; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c deleted file mode 100644 index e290ac67d975..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ /dev/null @@ -1,1005 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include - -#include "fw-api-coex.h" -#include "iwl-modparams.h" -#include "mvm.h" -#include "iwl-debug.h" - -/* 20MHz / 40MHz below / 40Mhz above*/ -static const __le64 iwl_ci_mask[][3] = { - /* dummy entry for channel 0 */ - {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, - { - cpu_to_le64(0x0000001FFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x00007FFFFFULL), - }, - { - cpu_to_le64(0x000000FFFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0003FFFFFFULL), - }, - { - cpu_to_le64(0x000003FFFCULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x000FFFFFFCULL), - }, - { - cpu_to_le64(0x00001FFFE0ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x007FFFFFE0ULL), - }, - { - cpu_to_le64(0x00007FFF80ULL), - cpu_to_le64(0x00007FFFFFULL), - cpu_to_le64(0x01FFFFFF80ULL), - }, - { - cpu_to_le64(0x0003FFFC00ULL), - cpu_to_le64(0x0003FFFFFFULL), - cpu_to_le64(0x0FFFFFFC00ULL), - }, - { - cpu_to_le64(0x000FFFF000ULL), - cpu_to_le64(0x000FFFFFFCULL), - cpu_to_le64(0x3FFFFFF000ULL), - }, - { - cpu_to_le64(0x007FFF8000ULL), - cpu_to_le64(0x007FFFFFE0ULL), - cpu_to_le64(0xFFFFFF8000ULL), - }, - { - cpu_to_le64(0x01FFFE0000ULL), - cpu_to_le64(0x01FFFFFF80ULL), - cpu_to_le64(0xFFFFFE0000ULL), - }, - { - cpu_to_le64(0x0FFFF00000ULL), - cpu_to_le64(0x0FFFFFFC00ULL), - cpu_to_le64(0x0ULL), - }, - { - cpu_to_le64(0x3FFFC00000ULL), - cpu_to_le64(0x3FFFFFF000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFFE000000ULL), - cpu_to_le64(0xFFFFFF8000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFF8000000ULL), - cpu_to_le64(0xFFFFFE0000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFC0000000ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0ULL) - }, -}; - -struct corunning_block_luts { - u8 range; - __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; -}; - -/* - * Ranges for the antenna coupling calibration / co-running block LUT: - * LUT0: [ 0, 12[ - * LUT1: [12, 20[ - * LUT2: [20, 21[ - * LUT3: [21, 23[ - * LUT4: [23, 27[ - * LUT5: [27, 30[ - * LUT6: [30, 32[ - * LUT7: [32, 33[ - * LUT8: [33, - [ - */ -static const struct corunning_block_luts antenna_coupling_ranges[] = { - { - .range = 0, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 12, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 20, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 21, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 23, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 27, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 30, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 32, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 33, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, -}; - -static enum iwl_bt_coex_lut_type -iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) -{ - struct ieee80211_chanctx_conf *chanctx_conf; - enum iwl_bt_coex_lut_type ret; - u16 phy_ctx_id; - u32 primary_ch_phy_id, secondary_ch_phy_id; - - /* - * Checking that we hold mvm->mutex is a good idea, but the rate - * control can't acquire the mutex since it runs in Tx path. - * So this is racy in that case, but in the worst case, the AMPDU - * size limit will be wrong for a short time which is not a big - * issue. - */ - - rcu_read_lock(); - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return BT_COEX_INVALID_LUT; - } - - ret = BT_COEX_TX_DIS_LUT; - - if (mvm->cfg->bt_shared_single_ant) { - rcu_read_unlock(); - return ret; - } - - phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); - primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id); - secondary_ch_phy_id = - le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id); - - if (primary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut); - else if (secondary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut); - /* else - default = TX TX disallowed */ - - rcu_read_unlock(); - - return ret; -} - -int iwl_send_bt_init_conf(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_cmd bt_cmd = {}; - u32 mode; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_send_bt_init_conf_old(mvm); - - lockdep_assert_held(&mvm->mutex); - - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { - switch (mvm->bt_force_ant_mode) { - case BT_FORCE_ANT_BT: - mode = BT_COEX_BT; - break; - case BT_FORCE_ANT_WIFI: - mode = BT_COEX_WIFI; - break; - default: - WARN_ON(1); - mode = 0; - } - - bt_cmd.mode = cpu_to_le32(mode); - goto send_cmd; - } - - mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; - bt_cmd.mode = cpu_to_le32(mode); - - if (IWL_MVM_BT_COEX_SYNC2SCO) - bt_cmd.enabled_modules |= - cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); - - if (iwl_mvm_bt_is_plcr_supported(mvm)) - bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); - - if (IWL_MVM_BT_COEX_MPLUT) { - bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); - bt_cmd.enabled_modules |= - cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED); - } - - bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); - -send_cmd: - memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); - memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); - - return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd); -} - -static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, - bool enable) -{ - struct iwl_bt_coex_reduced_txp_update_cmd cmd = {}; - struct iwl_mvm_sta *mvmsta; - u32 value; - int ret; - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - if (!mvmsta) - return 0; - - /* nothing to do */ - if (mvmsta->bt_reduced_txpower == enable) - return 0; - - value = mvmsta->sta_id; - - if (enable) - value |= BT_REDUCED_TX_POWER_BIT; - - IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", - enable ? "en" : "dis", sta_id); - - cmd.reduced_txp = cpu_to_le32(value); - mvmsta->bt_reduced_txpower = enable; - - ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, - sizeof(cmd), &cmd); - - return ret; -} - -struct iwl_bt_iterator_data { - struct iwl_bt_coex_profile_notif *notif; - struct iwl_mvm *mvm; - struct ieee80211_chanctx_conf *primary; - struct ieee80211_chanctx_conf *secondary; - bool primary_ll; -}; - -static inline -void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, int rssi) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->bf_data.last_bt_coex_event = rssi; - mvmvif->bf_data.bt_coex_max_thold = - enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; - mvmvif->bf_data.bt_coex_min_thold = - enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; -} - -/* must be called under rcu_read_lock */ -static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; - /* default smps_mode is AUTOMATIC - only used for client modes */ - enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; - u32 bt_activity_grading; - int ave_rssi; - - lockdep_assert_held(&mvm->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - break; - case NL80211_IFTYPE_AP: - if (!mvmvif->ap_ibss_active) - return; - break; - default: - return; - } - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - /* If channel context is invalid or not on 2.4GHz .. */ - if ((!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { - if (vif->type == NL80211_IFTYPE_STATION) { - /* ... relax constraints and disable rssi events */ - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - } - return; - } - - bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); - if (bt_activity_grading >= BT_HIGH_TRAFFIC) - smps_mode = IEEE80211_SMPS_STATIC; - else if (bt_activity_grading >= BT_LOW_TRAFFIC) - smps_mode = IEEE80211_SMPS_DYNAMIC; - - /* relax SMPS constraints for next association */ - if (!vif->bss_conf.assoc) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - if (mvmvif->phy_ctxt && - IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, - mvmvif->phy_ctxt->id)) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - IWL_DEBUG_COEX(data->mvm, - "mac %d: bt_activity_grading %d smps_req %d\n", - mvmvif->id, bt_activity_grading, smps_mode); - - if (vif->type == NL80211_IFTYPE_STATION) - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - - /* low latency is always primary */ - if (iwl_mvm_vif_low_latency(mvmvif)) { - data->primary_ll = true; - - data->secondary = data->primary; - data->primary = chanctx_conf; - } - - if (vif->type == NL80211_IFTYPE_AP) { - if (!mvmvif->ap_ibss_active) - return; - - if (chanctx_conf == data->primary) - return; - - if (!data->primary_ll) { - /* - * downgrade the current primary no matter what its - * type is. - */ - data->secondary = data->primary; - data->primary = chanctx_conf; - } else { - /* there is low latency vif - we will be secondary */ - data->secondary = chanctx_conf; - } - return; - } - - /* - * STA / P2P Client, try to be primary if first vif. If we are in low - * latency mode, we are already in primary and just don't do much - */ - if (!data->primary || data->primary == chanctx_conf) - data->primary = chanctx_conf; - else if (!data->secondary) - /* if secondary is not NULL, it might be a GO */ - data->secondary = chanctx_conf; - - /* - * don't reduce the Tx power if one of these is true: - * we are in LOOSE - * single share antenna product - * BT is active - * we are associated - */ - if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || - mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || - le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) { - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - return; - } - - /* try to get the avg rssi from fw */ - ave_rssi = mvmvif->bf_data.ave_beacon_signal; - - /* if the RSSI isn't valid, fake it is very low */ - if (!ave_rssi) - ave_rssi = -100; - if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } - - /* Begin to monitor the RSSI: it may influence the reduced Tx power */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); -} - -static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) -{ - struct iwl_bt_iterator_data data = { - .mvm = mvm, - .notif = &mvm->last_bt_notif, - }; - struct iwl_bt_coex_ci_cmd cmd = {}; - u8 ci_bw_idx; - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - rcu_read_lock(); - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_notif_iterator, &data); - - if (data.primary) { - struct ieee80211_chanctx_conf *chan = data.primary; - if (WARN_ON(!chan->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - } else { - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_primary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.primary_ch_phy_id = - cpu_to_le32(*((u16 *)data.primary->drv_priv)); - } - - if (data.secondary) { - struct ieee80211_chanctx_conf *chan = data.secondary; - if (WARN_ON(!data.secondary->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - } else { - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_secondary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.secondary_ch_phy_id = - cpu_to_le32(*((u16 *)data.secondary->drv_priv)); - } - - rcu_read_unlock(); - - /* Don't spam the fw with the same command over and over */ - if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) { - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); - memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); - } -} - -void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_rx_bt_coex_notif_old(mvm, rxb); - return; - } - - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); - IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(notif->primary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(notif->bt_activity_grading)); - - /* remember this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); - - iwl_mvm_bt_coex_notif_handle(mvm); -} - -void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data rssi_event) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event); - return; - } - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - /* - * Rssi update while not associated - can happen since the statistics - * are handled asynchronously - */ - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - /* No BT - reports should be disabled */ - if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) - return; - - IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, - rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); - - /* - * Check if rssi is good enough for reduced Tx power, but not in loose - * scheme. - */ - if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || - iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - else - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); - - if (ret) - IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); -} - -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) -#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) - -u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; - enum iwl_bt_coex_lut_type lut_type; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_coex_agg_time_limit_old(mvm, sta); - - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - - if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - /* tight coex, high bt traffic, reduce AGG time limit */ - return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; -} - -bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; - enum iwl_bt_coex_lut_type lut_type; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta); - - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) - return true; - - if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return true; - - /* - * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas - * since BT is already killed. - * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while - * we Tx. - * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. - */ - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - return lut_type != BT_COEX_LOOSE_LUT; -} - -bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) -{ - /* there is no other antenna, shared antenna is always available */ - if (mvm->cfg->bt_shared_single_ant) - return true; - - if (ant & mvm->cfg->non_shared_ant) - return true; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < - BT_HIGH_TRAFFIC; -} - -bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) -{ - /* there is no other antenna, shared antenna is always available */ - if (mvm->cfg->bt_shared_single_ant) - return true; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; -} - -bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, - enum ieee80211_band band) -{ - u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band); - - if (band != IEEE80211_BAND_2GHZ) - return false; - - return bt_activity >= BT_LOW_TRAFFIC; -} - -u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, u8 ac) -{ - __le16 fc = hdr->frame_control; - - if (info->band != IEEE80211_BAND_2GHZ) - return 0; - - if (unlikely(mvm->bt_tx_prio)) - return mvm->bt_tx_prio - 1; - - /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */ - if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO || - is_multicast_ether_addr(hdr->addr1) || - ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) || - ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) - return 3; - - switch (ac) { - case IEEE80211_AC_BE: - return 1; - case IEEE80211_AC_VO: - return 3; - case IEEE80211_AC_VI: - return 2; - default: - break; - } - - return 0; -} - -void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) -{ - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_bt_coex_vif_change_old(mvm); - return; - } - - iwl_mvm_bt_coex_notif_handle(mvm); -} - -void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 ant_isolation = le32_to_cpup((void *)pkt->data); - struct iwl_bt_coex_corun_lut_update_cmd cmd = {}; - u8 __maybe_unused lower_bound, upper_bound; - u8 lut; - - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb); - return; - } - - if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - if (ant_isolation == mvm->last_ant_isol) - return; - - for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) - if (ant_isolation < antenna_coupling_ranges[lut + 1].range) - break; - - lower_bound = antenna_coupling_ranges[lut].range; - - if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) - upper_bound = antenna_coupling_ranges[lut + 1].range; - else - upper_bound = antenna_coupling_ranges[lut].range; - - IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", - ant_isolation, lower_bound, upper_bound, lut); - - mvm->last_ant_isol = ant_isolation; - - if (mvm->last_corun_lut == lut) - return; - - mvm->last_corun_lut = lut; - - /* For the moment, use the same LUT for 20GHz and 40GHz */ - memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20, - sizeof(cmd.corun_lut20)); - - memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20, - sizeof(cmd.corun_lut40)); - - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, - "failed to send BT_COEX_UPDATE_CORUN_LUT command\n"); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c deleted file mode 100644 index 61c07b05fcaa..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ /dev/null @@ -1,1315 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include - -#include "fw-api-coex.h" -#include "iwl-modparams.h" -#include "mvm.h" -#include "iwl-debug.h" - -#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \ - [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \ - ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS)) - -static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1, - BT_COEX_PRIO_TBL_PRIO_BYPASS, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2, - BT_COEX_PRIO_TBL_PRIO_BYPASS, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1, - BT_COEX_PRIO_TBL_PRIO_LOW, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2, - BT_COEX_PRIO_TBL_PRIO_LOW, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1, - BT_COEX_PRIO_TBL_PRIO_HIGH, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2, - BT_COEX_PRIO_TBL_PRIO_HIGH, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM, - BT_COEX_PRIO_TBL_DISABLED, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52, - BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24, - BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE, - BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0), - 0, 0, 0, 0, 0, 0, -}; - -#undef EVENT_PRIO_ANT - -static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) -{ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0, - sizeof(struct iwl_bt_coex_prio_tbl_cmd), - &iwl_bt_prio_tbl); -} - -static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { - cpu_to_le32(0xf0f0f0f0), /* 50% */ - cpu_to_le32(0xc0c0c0c0), /* 25% */ - cpu_to_le32(0xfcfcfcfc), /* 75% */ - cpu_to_le32(0xfefefefe), /* 87.5% */ -}; - -static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, -}; - -static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { - { - /* Tight */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0x00004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, - { - /* Loose */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, - { - /* Tx Tx disabled */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xeeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, -}; - -/* 20MHz / 40MHz below / 40Mhz above*/ -static const __le64 iwl_ci_mask[][3] = { - /* dummy entry for channel 0 */ - {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, - { - cpu_to_le64(0x0000001FFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x00007FFFFFULL), - }, - { - cpu_to_le64(0x000000FFFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0003FFFFFFULL), - }, - { - cpu_to_le64(0x000003FFFCULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x000FFFFFFCULL), - }, - { - cpu_to_le64(0x00001FFFE0ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x007FFFFFE0ULL), - }, - { - cpu_to_le64(0x00007FFF80ULL), - cpu_to_le64(0x00007FFFFFULL), - cpu_to_le64(0x01FFFFFF80ULL), - }, - { - cpu_to_le64(0x0003FFFC00ULL), - cpu_to_le64(0x0003FFFFFFULL), - cpu_to_le64(0x0FFFFFFC00ULL), - }, - { - cpu_to_le64(0x000FFFF000ULL), - cpu_to_le64(0x000FFFFFFCULL), - cpu_to_le64(0x3FFFFFF000ULL), - }, - { - cpu_to_le64(0x007FFF8000ULL), - cpu_to_le64(0x007FFFFFE0ULL), - cpu_to_le64(0xFFFFFF8000ULL), - }, - { - cpu_to_le64(0x01FFFE0000ULL), - cpu_to_le64(0x01FFFFFF80ULL), - cpu_to_le64(0xFFFFFE0000ULL), - }, - { - cpu_to_le64(0x0FFFF00000ULL), - cpu_to_le64(0x0FFFFFFC00ULL), - cpu_to_le64(0x0ULL), - }, - { - cpu_to_le64(0x3FFFC00000ULL), - cpu_to_le64(0x3FFFFFF000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFFE000000ULL), - cpu_to_le64(0xFFFFFF8000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFF8000000ULL), - cpu_to_le64(0xFFFFFE0000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFC0000000ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0ULL) - }, -}; - -enum iwl_bt_kill_msk { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_MAX, -}; - -static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { - [BT_KILL_MSK_DEFAULT] = 0xfffffc00, - [BT_KILL_MSK_NEVER] = 0xffffffff, - [BT_KILL_MSK_ALWAYS] = 0, -}; - -static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - }, - { - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - }, - { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_DEFAULT, - }, -}; - -static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_DEFAULT, - }, -}; - -struct corunning_block_luts { - u8 range; - __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; -}; - -/* - * Ranges for the antenna coupling calibration / co-running block LUT: - * LUT0: [ 0, 12[ - * LUT1: [12, 20[ - * LUT2: [20, 21[ - * LUT3: [21, 23[ - * LUT4: [23, 27[ - * LUT5: [27, 30[ - * LUT6: [30, 32[ - * LUT7: [32, 33[ - * LUT8: [33, - [ - */ -static const struct corunning_block_luts antenna_coupling_ranges[] = { - { - .range = 0, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 12, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 20, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 21, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 23, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 27, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 30, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 32, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 33, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, -}; - -static enum iwl_bt_coex_lut_type -iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) -{ - struct ieee80211_chanctx_conf *chanctx_conf; - enum iwl_bt_coex_lut_type ret; - u16 phy_ctx_id; - - /* - * Checking that we hold mvm->mutex is a good idea, but the rate - * control can't acquire the mutex since it runs in Tx path. - * So this is racy in that case, but in the worst case, the AMPDU - * size limit will be wrong for a short time which is not a big - * issue. - */ - - rcu_read_lock(); - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return BT_COEX_INVALID_LUT; - } - - ret = BT_COEX_TX_DIS_LUT; - - if (mvm->cfg->bt_shared_single_ant) { - rcu_read_unlock(); - return ret; - } - - phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); - - if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut); - else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut); - /* else - default = TX TX disallowed */ - - rcu_read_unlock(); - - return ret; -} - -int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_cmd_old *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; - u32 flags; - - ret = iwl_send_bt_prio_tbl(mvm); - if (ret) - return ret; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - - lockdep_assert_held(&mvm->mutex); - - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { - switch (mvm->bt_force_ant_mode) { - case BT_FORCE_ANT_AUTO: - flags = BT_COEX_AUTO_OLD; - break; - case BT_FORCE_ANT_BT: - flags = BT_COEX_BT_OLD; - break; - case BT_FORCE_ANT_WIFI: - flags = BT_COEX_WIFI_OLD; - break; - default: - WARN_ON(1); - flags = 0; - } - - bt_cmd->flags = cpu_to_le32(flags); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE); - goto send_cmd; - } - - bt_cmd->max_kill = 5; - bt_cmd->bt4_antenna_isolation_thr = - IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS; - bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling; - bt_cmd->bt4_tx_tx_delta_freq_thr = 15; - bt_cmd->bt4_tx_rx_max_freq0 = 15; - bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT; - bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT; - - flags = iwlwifi_mod_params.bt_coex_active ? - BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD; - bt_cmd->flags = cpu_to_le32(flags); - - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_BT_PRIO_BOOST | - BT_VALID_MAX_KILL | - BT_VALID_3W_TMRS | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS | - BT_VALID_REDUCED_TX_POWER | - BT_VALID_LUT | - BT_VALID_WIFI_RX_SW_PRIO_BOOST | - BT_VALID_WIFI_TX_SW_PRIO_BOOST | - BT_VALID_ANT_ISOLATION | - BT_VALID_ANT_ISOLATION_THRS | - BT_VALID_TXTX_DELTA_FREQ_THRS | - BT_VALID_TXRX_MAX_FREQ_0 | - BT_VALID_SYNC_TO_SCO | - BT_VALID_TTC | - BT_VALID_RRC); - - if (IWL_MVM_BT_COEX_SYNC2SCO) - bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); - - if (iwl_mvm_bt_is_plcr_supported(mvm)) { - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); - bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); - } - - if (IWL_MVM_BT_COEX_MPLUT) { - bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); - } - - if (IWL_MVM_BT_COEX_TTC) - bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); - - if (iwl_mvm_bt_is_rrc_supported(mvm)) - bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); - - if (mvm->cfg->bt_shared_single_ant) - memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, - sizeof(iwl_single_shared_ant)); - else - memcpy(&bt_cmd->decision_lut, iwl_combined_lookup, - sizeof(iwl_combined_lookup)); - - /* Take first Co-running block LUT to get started */ - memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20, - sizeof(bt_cmd->bt4_corun_lut20)); - memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20, - sizeof(bt_cmd->bt4_corun_lut40)); - - memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost, - sizeof(iwl_bt_prio_boost)); - bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); - bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); - -send_cmd: - memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); - memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old; - u32 primary_lut = le32_to_cpu(notif->primary_ch_lut); - u32 ag = le32_to_cpu(notif->bt_activity_grading); - struct iwl_bt_coex_cmd_old *bt_cmd; - u8 ack_kill_msk, cts_kill_msk; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .data[0] = &bt_cmd, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret = 0; - - lockdep_assert_held(&mvm->mutex); - - ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut]; - cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut]; - - if (mvm->bt_ack_kill_msk[0] == ack_kill_msk && - mvm->bt_cts_kill_msk[0] == cts_kill_msk) - return 0; - - mvm->bt_ack_kill_msk[0] = ack_kill_msk; - mvm->bt_cts_kill_msk[0] = cts_kill_msk; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - - bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]); - bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, - bool enable) -{ - struct iwl_bt_coex_cmd_old *bt_cmd; - /* Send ASYNC since this can be sent from an atomic context */ - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_DUP, }, - .flags = CMD_ASYNC, - }; - struct iwl_mvm_sta *mvmsta; - int ret; - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - if (!mvmsta) - return 0; - - /* nothing to do */ - if (mvmsta->bt_reduced_txpower == enable) - return 0; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - - bt_cmd->valid_bit_msk = - cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER); - bt_cmd->bt_reduced_tx_power = sta_id; - - if (enable) - bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; - - IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", - enable ? "en" : "dis", sta_id); - - mvmsta->bt_reduced_txpower = enable; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -struct iwl_bt_iterator_data { - struct iwl_bt_coex_profile_notif_old *notif; - struct iwl_mvm *mvm; - struct ieee80211_chanctx_conf *primary; - struct ieee80211_chanctx_conf *secondary; - bool primary_ll; -}; - -static inline -void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, int rssi) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->bf_data.last_bt_coex_event = rssi; - mvmvif->bf_data.bt_coex_max_thold = - enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; - mvmvif->bf_data.bt_coex_min_thold = - enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; -} - -/* must be called under rcu_read_lock */ -static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_smps_mode smps_mode; - u32 bt_activity_grading; - int ave_rssi; - - lockdep_assert_held(&mvm->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - /* default smps_mode for BSS / P2P client is AUTOMATIC */ - smps_mode = IEEE80211_SMPS_AUTOMATIC; - break; - case NL80211_IFTYPE_AP: - if (!mvmvif->ap_ibss_active) - return; - break; - default: - return; - } - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - /* If channel context is invalid or not on 2.4GHz .. */ - if ((!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { - if (vif->type == NL80211_IFTYPE_STATION) { - /* ... relax constraints and disable rssi events */ - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - } - return; - } - - bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); - if (bt_activity_grading >= BT_HIGH_TRAFFIC) - smps_mode = IEEE80211_SMPS_STATIC; - else if (bt_activity_grading >= BT_LOW_TRAFFIC) - smps_mode = vif->type == NL80211_IFTYPE_AP ? - IEEE80211_SMPS_OFF : - IEEE80211_SMPS_DYNAMIC; - - /* relax SMPS contraints for next association */ - if (!vif->bss_conf.assoc) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - if (mvmvif->phy_ctxt && - data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - IWL_DEBUG_COEX(data->mvm, - "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", - mvmvif->id, data->notif->bt_status, bt_activity_grading, - smps_mode); - - if (vif->type == NL80211_IFTYPE_STATION) - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - - /* low latency is always primary */ - if (iwl_mvm_vif_low_latency(mvmvif)) { - data->primary_ll = true; - - data->secondary = data->primary; - data->primary = chanctx_conf; - } - - if (vif->type == NL80211_IFTYPE_AP) { - if (!mvmvif->ap_ibss_active) - return; - - if (chanctx_conf == data->primary) - return; - - if (!data->primary_ll) { - /* - * downgrade the current primary no matter what its - * type is. - */ - data->secondary = data->primary; - data->primary = chanctx_conf; - } else { - /* there is low latency vif - we will be secondary */ - data->secondary = chanctx_conf; - } - return; - } - - /* - * STA / P2P Client, try to be primary if first vif. If we are in low - * latency mode, we are already in primary and just don't do much - */ - if (!data->primary || data->primary == chanctx_conf) - data->primary = chanctx_conf; - else if (!data->secondary) - /* if secondary is not NULL, it might be a GO */ - data->secondary = chanctx_conf; - - /* - * don't reduce the Tx power if one of these is true: - * we are in LOOSE - * single share antenna product - * BT is active - * we are associated - */ - if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || - mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || - !data->notif->bt_status) { - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - return; - } - - /* try to get the avg rssi from fw */ - ave_rssi = mvmvif->bf_data.ave_beacon_signal; - - /* if the RSSI isn't valid, fake it is very low */ - if (!ave_rssi) - ave_rssi = -100; - if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } - - /* Begin to monitor the RSSI: it may influence the reduced Tx power */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); -} - -static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) -{ - struct iwl_bt_iterator_data data = { - .mvm = mvm, - .notif = &mvm->last_bt_notif_old, - }; - struct iwl_bt_coex_ci_cmd_old cmd = {}; - u8 ci_bw_idx; - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - rcu_read_lock(); - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_notif_iterator, &data); - - if (data.primary) { - struct ieee80211_chanctx_conf *chan = data.primary; - - if (WARN_ON(!chan->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - cmd.co_run_bw_primary = 0; - } else { - cmd.co_run_bw_primary = 1; - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_primary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv); - } - - if (data.secondary) { - struct ieee80211_chanctx_conf *chan = data.secondary; - - if (WARN_ON(!data.secondary->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - cmd.co_run_bw_secondary = 0; - } else { - cmd.co_run_bw_secondary = 1; - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_secondary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv); - } - - rcu_read_unlock(); - - /* Don't spam the fw with the same command over and over */ - if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) { - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); - memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd)); - } - - if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); -} - -void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data; - - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT status: %s\n", - notif->bt_status ? "ON" : "OFF"); - IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); - IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(notif->primary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(notif->bt_activity_grading)); - IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n", - notif->bt_agg_traffic_load); - - /* remember this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old)); - - iwl_mvm_bt_coex_notif_handle(mvm); -} - -static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - /* If channel context is invalid or not on 2.4GHz - don't count it */ - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return; - } - rcu_read_unlock(); - - if (vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); -} - -void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data rssi_event) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data data = { - .mvm = mvm, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - /* - * Rssi update while not associated - can happen since the statistics - * are handled asynchronously - */ - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - /* No BT - reports should be disabled */ - if (!mvm->last_bt_notif_old.bt_status) - return; - - IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, - rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); - - /* - * Check if rssi is good enough for reduced Tx power, but not in loose - * scheme. - */ - if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || - iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - else - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); - - if (ret) - IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_rssi_iterator, &data); - - if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); -} - -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) -#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) - -u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum iwl_bt_coex_lut_type lut_type; - - if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - if (mvm->last_bt_notif_old.ttc_enabled) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - - if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - /* tight coex, high bt traffic, reduce AGG time limit */ - return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; -} - -bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum iwl_bt_coex_lut_type lut_type; - - if (mvm->last_bt_notif_old.ttc_enabled) - return true; - - if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return true; - - /* - * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas - * since BT is already killed. - * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while - * we Tx. - * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. - */ - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - return lut_type != BT_COEX_LOOSE_LUT; -} - -bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) -{ - u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - return ag < BT_HIGH_TRAFFIC; -} - -bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, - enum ieee80211_band band) -{ - u32 bt_activity = - le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - - if (band != IEEE80211_BAND_2GHZ) - return false; - - return bt_activity >= BT_LOW_TRAFFIC; -} - -void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm) -{ - iwl_mvm_bt_coex_notif_handle(mvm); -} - -void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 ant_isolation = le32_to_cpup((void *)pkt->data); - u8 __maybe_unused lower_bound, upper_bound; - u8 lut; - - struct iwl_bt_coex_cmd_old *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - - if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - if (ant_isolation == mvm->last_ant_isol) - return; - - for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) - if (ant_isolation < antenna_coupling_ranges[lut + 1].range) - break; - - lower_bound = antenna_coupling_ranges[lut].range; - - if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) - upper_bound = antenna_coupling_ranges[lut + 1].range; - else - upper_bound = antenna_coupling_ranges[lut].range; - - IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", - ant_isolation, lower_bound, upper_bound, lut); - - mvm->last_ant_isol = ant_isolation; - - if (mvm->last_corun_lut == lut) - return; - - mvm->last_corun_lut = lut; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return; - cmd.data[0] = bt_cmd; - - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); - - /* For the moment, use the same LUT for 20GHz and 40GHz */ - memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20, - sizeof(bt_cmd->bt4_corun_lut20)); - - memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, - sizeof(bt_cmd->bt4_corun_lut40)); - - if (iwl_mvm_send_cmd(mvm, &cmd)) - IWL_ERR(mvm, "failed to send BT_CONFIG command\n"); - - kfree(bt_cmd); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h deleted file mode 100644 index 5c21231e195d..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ /dev/null @@ -1,139 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __MVM_CONSTANTS_H -#define __MVM_CONSTANTS_H - -#include - -#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) -#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) -#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) -#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) -#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ -#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ -#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 -#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) -#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) -#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ - IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ - IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ - IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) -#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 -#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8 -#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30 -#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20 -#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 -#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 -#define IWL_MVM_PS_SNOOZE_INTERVAL 25 -#define IWL_MVM_PS_SNOOZE_WINDOW 50 -#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25 -#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64 -#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 -#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 -#define IWL_MVM_BT_COEX_SYNC2SCO 1 -#define IWL_MVM_BT_COEX_CORUNNING 0 -#define IWL_MVM_BT_COEX_MPLUT 1 -#define IWL_MVM_BT_COEX_RRC 1 -#define IWL_MVM_BT_COEX_TTC 1 -#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200 -#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 -#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 -#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 -#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 -#define IWL_MVM_QUOTA_THRESHOLD 4 -#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 -#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 -#define IWL_MVM_TOF_IS_RESPONDER 0 -#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 -#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 -#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 -#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3 -#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3 -#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2 -#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2 -#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1 -#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16 -#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3 -#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1 -#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3 -#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8 -#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */ -#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */ -#define IWL_MVM_RS_MISSED_RATE_MAX 15 -#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160 -#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480 -#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160 -#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400 -#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500 -#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500 -#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */ -#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */ -#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */ -#define IWL_MVM_RS_AGG_DISABLE_START 3 -#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ -#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ -#define IWL_MVM_RS_TPC_TX_POWER_STEP 3 - -#endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c deleted file mode 100644 index 85ae902df7c0..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ /dev/null @@ -1,2104 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include "iwl-modparams.h" -#include "fw-api.h" -#include "mvm.h" - -void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (iwlwifi_mod_params.sw_crypto) - return; - - mutex_lock(&mvm->mutex); - - memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); - memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); - mvmvif->rekey_data.replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); - mvmvif->rekey_data.valid = true; - - mutex_unlock(&mvm->mutex); -} - -#if IS_ENABLED(CONFIG_IPV6) -void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct inet6_dev *idev) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct inet6_ifaddr *ifa; - int idx = 0; - - read_lock_bh(&idev->lock); - list_for_each_entry(ifa, &idev->addr_list, if_list) { - mvmvif->target_ipv6_addrs[idx] = ifa->addr; - idx++; - if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) - break; - } - read_unlock_bh(&idev->lock); - - mvmvif->num_target_ipv6_addrs = idx; -} -#endif - -void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int idx) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->tx_key_idx = idx; -} - -static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) -{ - int i; - - for (i = 0; i < IWL_P1K_SIZE; i++) - out[i] = cpu_to_le16(p1k[i]); -} - -struct wowlan_key_data { - struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; - struct iwl_wowlan_tkip_params_cmd *tkip; - bool error, use_rsc_tsc, use_tkip; - int wep_key_idx; -}; - -static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct wowlan_key_data *data = _data; - struct aes_sc *aes_sc, *aes_tx_sc = NULL; - struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; - struct iwl_p1k_cache *rx_p1ks; - u8 *rx_mic_key; - struct ieee80211_key_seq seq; - u32 cur_rx_iv32 = 0; - u16 p1k[IWL_P1K_SIZE]; - int ret, i; - - mutex_lock(&mvm->mutex); - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ - struct { - struct iwl_mvm_wep_key_cmd wep_key_cmd; - struct iwl_mvm_wep_key wep_key; - } __packed wkc = { - .wep_key_cmd.mac_id_n_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), - .wep_key_cmd.num_keys = 1, - /* firmware sets STA_KEY_FLG_WEP_13BYTES */ - .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, - .wep_key.key_index = key->keyidx, - .wep_key.key_size = key->keylen, - }; - - /* - * This will fail -- the key functions don't set support - * pairwise WEP keys. However, that's better than silently - * failing WoWLAN. Or maybe not? - */ - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - break; - - memcpy(&wkc.wep_key.key[3], key->key, key->keylen); - if (key->keyidx == mvmvif->tx_key_idx) { - /* TX key must be at offset 0 */ - wkc.wep_key.key_offset = 0; - } else { - /* others start at 1 */ - data->wep_key_idx++; - wkc.wep_key.key_offset = data->wep_key_idx; - } - - ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); - data->error = ret != 0; - - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - - /* don't upload key again */ - goto out_unlock; - } - default: - data->error = true; - goto out_unlock; - case WLAN_CIPHER_SUITE_AES_CMAC: - /* - * Ignore CMAC keys -- the WoWLAN firmware doesn't support them - * but we also shouldn't abort suspend due to that. It does have - * support for the IGTK key renewal, but doesn't really use the - * IGTK for anything. This means we could spuriously wake up or - * be deauthenticated, but that was considered acceptable. - */ - goto out_unlock; - case WLAN_CIPHER_SUITE_TKIP: - if (sta) { - tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; - tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; - - rx_p1ks = data->tkip->rx_uni; - - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); - - ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); - iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); - - memcpy(data->tkip->mic_keys.tx, - &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], - IWL_MIC_KEY_SIZE); - - rx_mic_key = data->tkip->mic_keys.rx_unicast; - } else { - tkip_sc = - data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; - rx_p1ks = data->tkip->rx_multi; - rx_mic_key = data->tkip->mic_keys.rx_mcast; - } - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 (as they need to to avoid replay attacks) - * for checking the IV in the frames. - */ - for (i = 0; i < IWL_NUM_RSC; i++) { - ieee80211_get_key_rx_seq(key, i, &seq); - tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); - /* wrapping isn't allowed, AP must rekey */ - if (seq.tkip.iv32 > cur_rx_iv32) - cur_rx_iv32 = seq.tkip.iv32; - } - - ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, - cur_rx_iv32, p1k); - iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); - ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, - cur_rx_iv32 + 1, p1k); - iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); - - memcpy(rx_mic_key, - &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], - IWL_MIC_KEY_SIZE); - - data->use_tkip = true; - data->use_rsc_tsc = true; - break; - case WLAN_CIPHER_SUITE_CCMP: - if (sta) { - u64 pn64; - - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; - aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - - pn64 = atomic64_read(&key->tx_pn); - aes_tx_sc->pn = cpu_to_le64(pn64); - } else { - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; - } - - /* - * For non-QoS this relies on the fact that both the uCode and - * mac80211 use TID 0 for checking the IV in the frames. - */ - for (i = 0; i < IWL_NUM_RSC; i++) { - u8 *pn = seq.ccmp.pn; - - ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc[i].pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); - } - data->use_rsc_tsc = true; - break; - } - - /* - * The D3 firmware hardcodes the key offset 0 as the key it uses - * to transmit packets to the AP, i.e. the PTK. - */ - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { - key->hw_key_idx = 0; - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - } else { - /* - * firmware only supports TSC/RSC for a single key, - * so if there are multiple keep overwriting them - * with new ones -- this relies on mac80211 doing - * list_add_tail(). - */ - key->hw_key_idx = 1; - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - } - - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); - data->error = ret != 0; -out_unlock: - mutex_unlock(&mvm->mutex); -} - -static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, - struct cfg80211_wowlan *wowlan) -{ - struct iwl_wowlan_patterns_cmd *pattern_cmd; - struct iwl_host_cmd cmd = { - .id = WOWLAN_PATTERNS, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - int i, err; - - if (!wowlan->n_patterns) - return 0; - - cmd.len[0] = sizeof(*pattern_cmd) + - wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern); - - pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); - if (!pattern_cmd) - return -ENOMEM; - - pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); - - for (i = 0; i < wowlan->n_patterns; i++) { - int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); - - memcpy(&pattern_cmd->patterns[i].mask, - wowlan->patterns[i].mask, mask_len); - memcpy(&pattern_cmd->patterns[i].pattern, - wowlan->patterns[i].pattern, - wowlan->patterns[i].pattern_len); - pattern_cmd->patterns[i].mask_size = mask_len; - pattern_cmd->patterns[i].pattern_size = - wowlan->patterns[i].pattern_len; - } - - cmd.data[0] = pattern_cmd; - err = iwl_mvm_send_cmd(mvm, &cmd); - kfree(pattern_cmd); - return err; -} - -enum iwl_mvm_tcp_packet_type { - MVM_TCP_TX_SYN, - MVM_TCP_RX_SYNACK, - MVM_TCP_TX_DATA, - MVM_TCP_RX_ACK, - MVM_TCP_RX_WAKE, - MVM_TCP_TX_FIN, -}; - -static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr) -{ - __sum16 check = tcp_v4_check(len, saddr, daddr, 0); - return cpu_to_le16(be16_to_cpu((__force __be16)check)); -} - -static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif, - struct cfg80211_wowlan_tcp *tcp, - void *_pkt, u8 *mask, - __le16 *pseudo_hdr_csum, - enum iwl_mvm_tcp_packet_type ptype) -{ - struct { - struct ethhdr eth; - struct iphdr ip; - struct tcphdr tcp; - u8 data[]; - } __packed *pkt = _pkt; - u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr); - int i; - - pkt->eth.h_proto = cpu_to_be16(ETH_P_IP), - pkt->ip.version = 4; - pkt->ip.ihl = 5; - pkt->ip.protocol = IPPROTO_TCP; - - switch (ptype) { - case MVM_TCP_TX_SYN: - case MVM_TCP_TX_DATA: - case MVM_TCP_TX_FIN: - memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN); - memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN); - pkt->ip.ttl = 128; - pkt->ip.saddr = tcp->src; - pkt->ip.daddr = tcp->dst; - pkt->tcp.source = cpu_to_be16(tcp->src_port); - pkt->tcp.dest = cpu_to_be16(tcp->dst_port); - /* overwritten for TX SYN later */ - pkt->tcp.doff = sizeof(struct tcphdr) / 4; - pkt->tcp.window = cpu_to_be16(65000); - break; - case MVM_TCP_RX_SYNACK: - case MVM_TCP_RX_ACK: - case MVM_TCP_RX_WAKE: - memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN); - memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN); - pkt->ip.saddr = tcp->dst; - pkt->ip.daddr = tcp->src; - pkt->tcp.source = cpu_to_be16(tcp->dst_port); - pkt->tcp.dest = cpu_to_be16(tcp->src_port); - break; - default: - WARN_ON(1); - return; - } - - switch (ptype) { - case MVM_TCP_TX_SYN: - /* firmware assumes 8 option bytes - 8 NOPs for now */ - memset(pkt->data, 0x01, 8); - ip_tot_len += 8; - pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4; - pkt->tcp.syn = 1; - break; - case MVM_TCP_TX_DATA: - ip_tot_len += tcp->payload_len; - memcpy(pkt->data, tcp->payload, tcp->payload_len); - pkt->tcp.psh = 1; - pkt->tcp.ack = 1; - break; - case MVM_TCP_TX_FIN: - pkt->tcp.fin = 1; - pkt->tcp.ack = 1; - break; - case MVM_TCP_RX_SYNACK: - pkt->tcp.syn = 1; - pkt->tcp.ack = 1; - break; - case MVM_TCP_RX_ACK: - pkt->tcp.ack = 1; - break; - case MVM_TCP_RX_WAKE: - ip_tot_len += tcp->wake_len; - pkt->tcp.psh = 1; - pkt->tcp.ack = 1; - memcpy(pkt->data, tcp->wake_data, tcp->wake_len); - break; - } - - switch (ptype) { - case MVM_TCP_TX_SYN: - case MVM_TCP_TX_DATA: - case MVM_TCP_TX_FIN: - pkt->ip.tot_len = cpu_to_be16(ip_tot_len); - pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl); - break; - case MVM_TCP_RX_WAKE: - for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) { - u8 tmp = tcp->wake_mask[i]; - mask[i + 6] |= tmp << 6; - if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8)) - mask[i + 7] = tmp >> 2; - } - /* fall through for ethernet/IP/TCP headers mask */ - case MVM_TCP_RX_SYNACK: - case MVM_TCP_RX_ACK: - mask[0] = 0xff; /* match ethernet */ - /* - * match ethernet, ip.version, ip.ihl - * the ip.ihl half byte is really masked out by firmware - */ - mask[1] = 0x7f; - mask[2] = 0x80; /* match ip.protocol */ - mask[3] = 0xfc; /* match ip.saddr, ip.daddr */ - mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */ - mask[5] = 0x80; /* match tcp flags */ - /* leave rest (0 or set for MVM_TCP_RX_WAKE) */ - break; - }; - - *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr), - pkt->ip.saddr, pkt->ip.daddr); -} - -static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_wowlan_tcp *tcp) -{ - struct iwl_wowlan_remote_wake_config *cfg; - struct iwl_host_cmd cmd = { - .id = REMOTE_WAKE_CONFIG_CMD, - .len = { sizeof(*cfg), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; - - if (!tcp) - return 0; - - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - cmd.data[0] = cfg; - - cfg->max_syn_retries = 10; - cfg->max_data_retries = 10; - cfg->tcp_syn_ack_timeout = 1; /* seconds */ - cfg->tcp_ack_timeout = 1; /* seconds */ - - /* SYN (TX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->syn_tx.data, NULL, - &cfg->syn_tx.info.tcp_pseudo_header_checksum, - MVM_TCP_TX_SYN); - cfg->syn_tx.info.tcp_payload_length = 0; - - /* SYN/ACK (RX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, - &cfg->synack_rx.info.tcp_pseudo_header_checksum, - MVM_TCP_RX_SYNACK); - cfg->synack_rx.info.tcp_payload_length = 0; - - /* KEEPALIVE/ACK (TX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->keepalive_tx.data, NULL, - &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, - MVM_TCP_TX_DATA); - cfg->keepalive_tx.info.tcp_payload_length = - cpu_to_le16(tcp->payload_len); - cfg->sequence_number_offset = tcp->payload_seq.offset; - /* length must be 0..4, the field is little endian */ - cfg->sequence_number_length = tcp->payload_seq.len; - cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start); - cfg->keepalive_interval = cpu_to_le16(tcp->data_interval); - if (tcp->payload_tok.len) { - cfg->token_offset = tcp->payload_tok.offset; - cfg->token_length = tcp->payload_tok.len; - cfg->num_tokens = - cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len); - memcpy(cfg->tokens, tcp->payload_tok.token_stream, - tcp->tokens_size); - } else { - /* set tokens to max value to almost never run out */ - cfg->num_tokens = cpu_to_le16(65535); - } - - /* ACK (RX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->keepalive_ack_rx.data, - cfg->keepalive_ack_rx.rx_mask, - &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, - MVM_TCP_RX_ACK); - cfg->keepalive_ack_rx.info.tcp_payload_length = 0; - - /* WAKEUP (RX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, - &cfg->wake_rx.info.tcp_pseudo_header_checksum, - MVM_TCP_RX_WAKE); - cfg->wake_rx.info.tcp_payload_length = - cpu_to_le16(tcp->wake_len); - - /* FIN */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->fin_tx.data, NULL, - &cfg->fin_tx.info.tcp_pseudo_header_checksum, - MVM_TCP_TX_FIN); - cfg->fin_tx.info.tcp_payload_length = 0; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - kfree(cfg); - - return ret; -} - -static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *ap_sta) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_chanctx_conf *ctx; - u8 chains_static, chains_dynamic; - struct cfg80211_chan_def chandef; - int ret, i; - struct iwl_binding_cmd binding_cmd = {}; - struct iwl_time_quota_cmd quota_cmd = {}; - u32 status; - - /* add back the PHY */ - if (WARN_ON(!mvmvif->phy_ctxt)) - return -EINVAL; - - rcu_read_lock(); - ctx = rcu_dereference(vif->chanctx_conf); - if (WARN_ON(!ctx)) { - rcu_read_unlock(); - return -EINVAL; - } - chandef = ctx->def; - chains_static = ctx->rx_chains_static; - chains_dynamic = ctx->rx_chains_dynamic; - rcu_read_unlock(); - - ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, - chains_static, chains_dynamic); - if (ret) - return ret; - - /* add back the MAC */ - mvmvif->uploaded = false; - - if (WARN_ON(!vif->bss_conf.assoc)) - return -EINVAL; - - ret = iwl_mvm_mac_ctxt_add(mvm, vif); - if (ret) - return ret; - - /* add back binding - XXX refactor? */ - binding_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); - binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); - binding_cmd.phy = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); - binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)); - for (i = 1; i < MAX_MACS_IN_BINDING; i++) - binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); - - status = 0; - ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - sizeof(binding_cmd), &binding_cmd, - &status); - if (ret) { - IWL_ERR(mvm, "Failed to add binding: %d\n", ret); - return ret; - } - - if (status) { - IWL_ERR(mvm, "Binding command failed: %u\n", status); - return -EIO; - } - - ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); - if (ret) - return ret; - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); - - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - if (ret) - return ret; - - /* and some quota */ - quota_cmd.quotas[0].id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); - quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); - quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); - - for (i = 1; i < MAX_BINDINGS; i++) - quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); - - ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, - sizeof(quota_cmd), "a_cmd); - if (ret) - IWL_ERR(mvm, "Failed to send quota: %d\n", ret); - - if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) - IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); - - return 0; -} - -static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_nonqos_seq_query_cmd query_cmd = { - .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), - .mac_id_n_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), - }; - struct iwl_host_cmd cmd = { - .id = NON_QOS_TX_COUNTER_CMD, - .flags = CMD_WANT_SKB, - }; - int err; - u32 size; - - cmd.data[0] = &query_cmd; - cmd.len[0] = sizeof(query_cmd); - - err = iwl_mvm_send_cmd(mvm, &cmd); - if (err) - return err; - - size = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (size < sizeof(__le16)) { - err = -EINVAL; - } else { - err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); - /* firmware returns next, not last-used seqno */ - err = (u16) (err - 0x10); - } - - iwl_free_resp(&cmd); - return err; -} - -void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_nonqos_seq_query_cmd query_cmd = { - .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), - .mac_id_n_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), - .value = cpu_to_le16(mvmvif->seqno), - }; - - /* return if called during restart, not resume from D3 */ - if (!mvmvif->seqno_valid) - return; - - mvmvif->seqno_valid = false; - - if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, - sizeof(query_cmd), &query_cmd)) - IWL_ERR(mvm, "failed to set non-QoS seqno\n"); -} - -static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) -{ - iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - - iwl_trans_stop_device(mvm->trans); - - /* - * Set the HW restart bit -- this is mostly true as we're - * going to load new firmware and reprogram that, though - * the reprogramming is going to be manual to avoid adding - * all the MACs that aren't support. - * We don't have to clear up everything though because the - * reprogramming is manual. When we resume, we'll actually - * go through a proper restart sequence again to switch - * back to the runtime firmware image. - */ - set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - - /* We reprogram keys and shouldn't allocate new key indices */ - memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - - mvm->ptk_ivlen = 0; - mvm->ptk_icvlen = 0; - mvm->ptk_ivlen = 0; - mvm->ptk_icvlen = 0; - - return iwl_mvm_load_d3_fw(mvm); -} - -static int -iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, - struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, - struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, - struct ieee80211_sta *ap_sta) -{ - int ret; - struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); - - /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ - - wowlan_config_cmd->is_11n_connection = - ap_sta->ht_cap.ht_supported; - - /* Query the last used seqno and set it */ - ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); - if (ret < 0) - return ret; - - wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); - - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); - - if (wowlan->disconnect) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE); - if (wowlan->magic_pkt) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); - if (wowlan->gtk_rekey_failure) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); - if (wowlan->eap_identity_req) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); - if (wowlan->four_way_handshake) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); - if (wowlan->n_patterns) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); - - if (wowlan->rfkill_release) - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); - - if (wowlan->tcp) { - /* - * Set the "link change" (really "link lost") flag as well - * since that implies losing the TCP connection. - */ - wowlan_config_cmd->wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | - IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | - IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | - IWL_WOWLAN_WAKEUP_LINK_CHANGE); - } - - return 0; -} - -static int -iwl_mvm_wowlan_config(struct iwl_mvm *mvm, - struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, - struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, - struct ieee80211_sta *ap_sta) -{ - struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; - struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; - struct wowlan_key_data key_data = { - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - }; - int ret; - - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; - - ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); - if (ret) - return ret; - - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; - - if (!iwlwifi_mod_params.sw_crypto) { - /* - * This needs to be unlocked due to lock ordering - * constraints. Since we're in the suspend path - * that isn't really a problem though. - */ - mutex_unlock(&mvm->mutex); - ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_wowlan_program_keys, - &key_data); - mutex_lock(&mvm->mutex); - if (key_data.error) { - ret = -EIO; - goto out; - } - - if (key_data.use_rsc_tsc) { - struct iwl_host_cmd rsc_tsc_cmd = { - .id = WOWLAN_TSC_RSC_PARAM, - .data[0] = key_data.rsc_tsc, - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .len[0] = sizeof(*key_data.rsc_tsc), - }; - - ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd); - if (ret) - goto out; - } - - if (key_data.use_tkip) { - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_TKIP_PARAM, - 0, sizeof(tkip_cmd), - &tkip_cmd); - if (ret) - goto out; - } - - if (mvmvif->rekey_data.valid) { - memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); - memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, - NL80211_KCK_LEN); - kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); - memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, - NL80211_KEK_LEN); - kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); - kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; - - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_KEK_KCK_MATERIAL, 0, - sizeof(kek_kck_cmd), - &kek_kck_cmd); - if (ret) - goto out; - } - } - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, - sizeof(*wowlan_config_cmd), - wowlan_config_cmd); - if (ret) - goto out; - - ret = iwl_mvm_send_patterns(mvm, wowlan); - if (ret) - goto out; - - ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0); - if (ret) - goto out; - - ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp); - -out: - kfree(key_data.rsc_tsc); - return ret; -} - -static int -iwl_mvm_netdetect_config(struct iwl_mvm *mvm, - struct cfg80211_wowlan *wowlan, - struct cfg80211_sched_scan_request *nd_config, - struct ieee80211_vif *vif) -{ - struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; - int ret; - - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; - - /* rfkill release can be either for wowlan or netdetect */ - if (wowlan->rfkill_release) - wowlan_config_cmd.wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, - sizeof(wowlan_config_cmd), - &wowlan_config_cmd); - if (ret) - return ret; - - ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, - IWL_MVM_SCAN_NETDETECT); - if (ret) - return ret; - - if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) - return -EBUSY; - - /* save the sched scan matchsets... */ - if (nd_config->n_match_sets) { - mvm->nd_match_sets = kmemdup(nd_config->match_sets, - sizeof(*nd_config->match_sets) * - nd_config->n_match_sets, - GFP_KERNEL); - if (mvm->nd_match_sets) - mvm->n_nd_match_sets = nd_config->n_match_sets; - } - - /* ...and the sched scan channels for later reporting */ - mvm->nd_channels = kmemdup(nd_config->channels, - sizeof(*nd_config->channels) * - nd_config->n_channels, - GFP_KERNEL); - if (mvm->nd_channels) - mvm->n_nd_channels = nd_config->n_channels; - - return 0; -} - -static void iwl_mvm_free_nd(struct iwl_mvm *mvm) -{ - kfree(mvm->nd_match_sets); - mvm->nd_match_sets = NULL; - mvm->n_nd_match_sets = 0; - kfree(mvm->nd_channels); - mvm->nd_channels = NULL; - mvm->n_nd_channels = 0; -} - -static int __iwl_mvm_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wowlan, - bool test) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct ieee80211_vif *vif = NULL; - struct iwl_mvm_vif *mvmvif = NULL; - struct ieee80211_sta *ap_sta = NULL; - struct iwl_d3_manager_config d3_cfg_cmd_data = { - /* - * Program the minimum sleep time to 10 seconds, as many - * platforms have issues processing a wakeup signal while - * still being in the process of suspending. - */ - .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), - }; - struct iwl_host_cmd d3_cfg_cmd = { - .id = D3_CONFIG_CMD, - .flags = CMD_WANT_SKB, - .data[0] = &d3_cfg_cmd_data, - .len[0] = sizeof(d3_cfg_cmd_data), - }; - int ret; - int len __maybe_unused; - - if (!wowlan) { - /* - * mac80211 shouldn't get here, but for D3 test - * it doesn't warrant a warning - */ - WARN_ON(!test); - return -EINVAL; - } - - mutex_lock(&mvm->mutex); - - vif = iwl_mvm_get_bss_vif(mvm); - if (IS_ERR_OR_NULL(vif)) { - ret = 1; - goto out_noreset; - } - - mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { - /* if we're not associated, this must be netdetect */ - if (!wowlan->nd_config && !mvm->nd_config) { - ret = 1; - goto out_noreset; - } - - ret = iwl_mvm_netdetect_config( - mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif); - if (ret) - goto out; - - mvm->net_detect = true; - } else { - struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; - - ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(ap_sta)) { - ret = -EINVAL; - goto out_noreset; - } - - ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, - vif, mvmvif, ap_sta); - if (ret) - goto out_noreset; - ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, - vif, mvmvif, ap_sta); - if (ret) - goto out; - - mvm->net_detect = false; - } - - ret = iwl_mvm_power_update_device(mvm); - if (ret) - goto out; - - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - goto out; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->d3_wake_sysassert) - d3_cfg_cmd_data.wakeup_flags |= - cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); -#endif - - /* must be last -- this switches firmware state */ - ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); - if (ret) - goto out; -#ifdef CONFIG_IWLWIFI_DEBUGFS - len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); - if (len >= sizeof(u32)) { - mvm->d3_test_pme_ptr = - le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); - } -#endif - iwl_free_resp(&d3_cfg_cmd); - - clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - - iwl_trans_d3_suspend(mvm->trans, test); - out: - if (ret < 0) { - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - ieee80211_restart_hw(mvm->hw); - iwl_mvm_free_nd(mvm); - } - out_noreset: - mutex_unlock(&mvm->mutex); - - return ret; -} - -static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) -{ - struct iwl_notification_wait wait_d3; - static const u16 d3_notif[] = { D3_CONFIG_CMD }; - int ret; - - iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, - d3_notif, ARRAY_SIZE(d3_notif), - NULL, NULL); - - ret = iwl_mvm_enter_d0i3(mvm->hw->priv); - if (ret) - goto remove_notif; - - ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ); - WARN_ON_ONCE(ret); - return ret; - -remove_notif: - iwl_remove_notification(&mvm->notif_wait, &wait_d3); - return ret; -} - -int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - /* make sure the d0i3 exit work is not pending */ - flush_work(&mvm->d0i3_exit_work); - - ret = iwl_trans_suspend(mvm->trans); - if (ret) - return ret; - - mvm->trans->wowlan_d0i3 = wowlan->any; - if (mvm->trans->wowlan_d0i3) { - /* 'any' trigger means d0i3 usage */ - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { - ret = iwl_mvm_enter_d0i3_sync(mvm); - - if (ret) - return ret; - } - - mutex_lock(&mvm->d0i3_suspend_mutex); - __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - - iwl_trans_d3_suspend(mvm->trans, false); - - return 0; - } - - return __iwl_mvm_suspend(hw, wowlan, false); -} - -/* converted data from the different status responses */ -struct iwl_wowlan_status_data { - u16 pattern_number; - u16 qos_seq_ctr[8]; - u32 wakeup_reasons; - u32 wake_packet_length; - u32 wake_packet_bufsize; - const u8 *wake_packet; -}; - -static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_wowlan_status_data *status) -{ - struct sk_buff *pkt = NULL; - struct cfg80211_wowlan_wakeup wakeup = { - .pattern_idx = -1, - }; - struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; - u32 reasons = status->wakeup_reasons; - - if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { - wakeup_report = NULL; - goto report; - } - - if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) - wakeup.magic_pkt = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) - wakeup.pattern_idx = - status->pattern_number; - - if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) - wakeup.disconnect = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) - wakeup.gtk_rekey_failure = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) - wakeup.rfkill_release = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) - wakeup.eap_identity_req = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) - wakeup.four_way_handshake = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) - wakeup.tcp_connlost = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) - wakeup.tcp_nomoretokens = true; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) - wakeup.tcp_match = true; - - if (status->wake_packet_bufsize) { - int pktsize = status->wake_packet_bufsize; - int pktlen = status->wake_packet_length; - const u8 *pktdata = status->wake_packet; - struct ieee80211_hdr *hdr = (void *)pktdata; - int truncated = pktlen - pktsize; - - /* this would be a firmware bug */ - if (WARN_ON_ONCE(truncated < 0)) - truncated = 0; - - if (ieee80211_is_data(hdr->frame_control)) { - int hdrlen = ieee80211_hdrlen(hdr->frame_control); - int ivlen = 0, icvlen = 4; /* also FCS */ - - pkt = alloc_skb(pktsize, GFP_KERNEL); - if (!pkt) - goto report; - - memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen); - pktdata += hdrlen; - pktsize -= hdrlen; - - if (ieee80211_has_protected(hdr->frame_control)) { - /* - * This is unlocked and using gtk_i(c)vlen, - * but since everything is under RTNL still - * that's not really a problem - changing - * it would be difficult. - */ - if (is_multicast_ether_addr(hdr->addr1)) { - ivlen = mvm->gtk_ivlen; - icvlen += mvm->gtk_icvlen; - } else { - ivlen = mvm->ptk_ivlen; - icvlen += mvm->ptk_icvlen; - } - } - - /* if truncated, FCS/ICV is (partially) gone */ - if (truncated >= icvlen) { - icvlen = 0; - truncated -= icvlen; - } else { - icvlen -= truncated; - truncated = 0; - } - - pktsize -= ivlen + icvlen; - pktdata += ivlen; - - memcpy(skb_put(pkt, pktsize), pktdata, pktsize); - - if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) - goto report; - wakeup.packet = pkt->data; - wakeup.packet_present_len = pkt->len; - wakeup.packet_len = pkt->len - truncated; - wakeup.packet_80211 = false; - } else { - int fcslen = 4; - - if (truncated >= 4) { - truncated -= 4; - fcslen = 0; - } else { - fcslen -= truncated; - truncated = 0; - } - pktsize -= fcslen; - wakeup.packet = status->wake_packet; - wakeup.packet_present_len = pktsize; - wakeup.packet_len = pktlen - truncated; - wakeup.packet_80211 = true; - } - } - - report: - ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); - kfree_skb(pkt); -} - -static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, - struct ieee80211_key_seq *seq) -{ - u64 pn; - - pn = le64_to_cpu(sc->pn); - seq->ccmp.pn[0] = pn >> 40; - seq->ccmp.pn[1] = pn >> 32; - seq->ccmp.pn[2] = pn >> 24; - seq->ccmp.pn[3] = pn >> 16; - seq->ccmp.pn[4] = pn >> 8; - seq->ccmp.pn[5] = pn; -} - -static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, - struct ieee80211_key_seq *seq) -{ - seq->tkip.iv32 = le32_to_cpu(sc->iv32); - seq->tkip.iv16 = le16_to_cpu(sc->iv16); -} - -static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs, - struct ieee80211_key_conf *key) -{ - int tid; - - BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); - - for (tid = 0; tid < IWL_NUM_RSC; tid++) { - struct ieee80211_key_seq seq = {}; - - iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); - ieee80211_set_key_rx_seq(key, tid, &seq); - } -} - -static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, - struct ieee80211_key_conf *key) -{ - int tid; - - BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); - - for (tid = 0; tid < IWL_NUM_RSC; tid++) { - struct ieee80211_key_seq seq = {}; - - iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); - ieee80211_set_key_rx_seq(key, tid, &seq); - } -} - -static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, - struct iwl_wowlan_status *status) -{ - union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key); - break; - case WLAN_CIPHER_SUITE_TKIP: - iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); - break; - default: - WARN_ON(1); - } -} - -struct iwl_mvm_d3_gtk_iter_data { - struct iwl_wowlan_status *status; - void *last_gtk; - u32 cipher; - bool find_phase, unhandled_cipher; - int num_keys; -}; - -static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) -{ - struct iwl_mvm_d3_gtk_iter_data *data = _data; - - if (data->unhandled_cipher) - return; - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - /* ignore WEP completely, nothing to do */ - return; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_TKIP: - /* we support these */ - break; - default: - /* everything else (even CMAC for MFP) - disconnect from AP */ - data->unhandled_cipher = true; - return; - } - - data->num_keys++; - - /* - * pairwise key - update sequence counters only; - * note that this assumes no TDLS sessions are active - */ - if (sta) { - struct ieee80211_key_seq seq = {}; - union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc; - - if (data->find_phase) - return; - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); - atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); - break; - case WLAN_CIPHER_SUITE_TKIP: - iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); - iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); - ieee80211_set_key_tx_seq(key, &seq); - break; - } - - /* that's it for this key */ - return; - } - - if (data->find_phase) { - data->last_gtk = key; - data->cipher = key->cipher; - return; - } - - if (data->status->num_of_gtk_rekeys) - ieee80211_remove_key(key); - else if (data->last_gtk == key) - iwl_mvm_set_key_rx_seq(key, data->status); -} - -static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_wowlan_status *status) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_d3_gtk_iter_data gtkdata = { - .status = status, - }; - u32 disconnection_reasons = - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; - - if (!status || !vif->bss_conf.bssid) - return false; - - if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) - return false; - - /* find last GTK that we used initially, if any */ - gtkdata.find_phase = true; - ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_d3_update_gtks, >kdata); - /* not trying to keep connections with MFP/unhandled ciphers */ - if (gtkdata.unhandled_cipher) - return false; - if (!gtkdata.num_keys) - goto out; - if (!gtkdata.last_gtk) - return false; - - /* - * invalidate all other GTKs that might still exist and update - * the one that we used - */ - gtkdata.find_phase = false; - ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_d3_update_gtks, >kdata); - - if (status->num_of_gtk_rekeys) { - struct ieee80211_key_conf *key; - struct { - struct ieee80211_key_conf conf; - u8 key[32]; - } conf = { - .conf.cipher = gtkdata.cipher, - .conf.keyidx = status->gtk.key_index, - }; - - switch (gtkdata.cipher) { - case WLAN_CIPHER_SUITE_CCMP: - conf.conf.keylen = WLAN_KEY_LEN_CCMP; - memcpy(conf.conf.key, status->gtk.decrypt_key, - WLAN_KEY_LEN_CCMP); - break; - case WLAN_CIPHER_SUITE_TKIP: - conf.conf.keylen = WLAN_KEY_LEN_TKIP; - memcpy(conf.conf.key, status->gtk.decrypt_key, 16); - /* leave TX MIC key zeroed, we don't use it anyway */ - memcpy(conf.conf.key + - NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, - status->gtk.tkip_mic_key, 8); - break; - } - - key = ieee80211_gtk_rekey_add(vif, &conf.conf); - if (IS_ERR(key)) - return false; - iwl_mvm_set_key_rx_seq(key, status); - } - - if (status->num_of_gtk_rekeys) { - __be64 replay_ctr = - cpu_to_be64(le64_to_cpu(status->replay_ctr)); - ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, - (void *)&replay_ctr, GFP_KERNEL); - } - -out: - mvmvif->seqno_valid = true; - /* +0x10 because the set API expects next-to-use, not last-used */ - mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; - - return true; -} - -static struct iwl_wowlan_status * -iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - u32 base = mvm->error_event_table; - struct error_table_start { - /* cf. struct iwl_error_event_table */ - u32 valid; - u32 error_id; - } err_info; - struct iwl_host_cmd cmd = { - .id = WOWLAN_GET_STATUSES, - .flags = CMD_WANT_SKB, - }; - struct iwl_wowlan_status *status, *fw_status; - int ret, len, status_size; - - iwl_trans_read_mem_bytes(mvm->trans, base, - &err_info, sizeof(err_info)); - - if (err_info.valid) { - IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", - err_info.valid, err_info.error_id); - if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - struct cfg80211_wowlan_wakeup wakeup = { - .rfkill_release = true, - }; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); - } - return ERR_PTR(-EIO); - } - - /* only for tracing for now */ - ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); - if (ret) - IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, "failed to query status (%d)\n", ret); - return ERR_PTR(ret); - } - - /* RF-kill already asserted again... */ - if (!cmd.resp_pkt) { - fw_status = ERR_PTR(-ERFKILL); - goto out_free_resp; - } - - status_size = sizeof(*fw_status); - - len = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (len < status_size) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - fw_status = ERR_PTR(-EIO); - goto out_free_resp; - } - - status = (void *)cmd.resp_pkt->data; - if (len != (status_size + - ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - fw_status = ERR_PTR(-EIO); - goto out_free_resp; - } - - fw_status = kmemdup(status, len, GFP_KERNEL); - -out_free_resp: - iwl_free_resp(&cmd); - return fw_status; -} - -/* releases the MVM mutex */ -static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_wowlan_status_data status; - struct iwl_wowlan_status *fw_status; - int i; - bool keep; - struct ieee80211_sta *ap_sta; - struct iwl_mvm_sta *mvm_ap_sta; - - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); - if (IS_ERR_OR_NULL(fw_status)) - goto out_unlock; - - status.pattern_number = le16_to_cpu(fw_status->pattern_number); - for (i = 0; i < 8; i++) - status.qos_seq_ctr[i] = - le16_to_cpu(fw_status->qos_seq_ctr[i]); - status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); - status.wake_packet_length = - le32_to_cpu(fw_status->wake_packet_length); - status.wake_packet_bufsize = - le32_to_cpu(fw_status->wake_packet_bufsize); - status.wake_packet = fw_status->wake_packet; - - /* still at hard-coded place 0 for D3 image */ - ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[0], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(ap_sta)) - goto out_free; - - mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = status.qos_seq_ctr[i]; - /* firmware stores last-used value, we store next value */ - seq += 0x10; - mvm_ap_sta->tid_data[i].seq_number = seq; - } - - /* now we have all the data we need, unlock to avoid mac80211 issues */ - mutex_unlock(&mvm->mutex); - - iwl_mvm_report_wakeup_reasons(mvm, vif, &status); - - keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); - - kfree(fw_status); - return keep; - -out_free: - kfree(fw_status); -out_unlock: - mutex_unlock(&mvm->mutex); - return false; -} - -struct iwl_mvm_nd_query_results { - u32 matched_profiles; - struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; -}; - -static int -iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *results) -{ - struct iwl_scan_offload_profiles_query *query; - struct iwl_host_cmd cmd = { - .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, - .flags = CMD_WANT_SKB, - }; - int ret, len; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); - return ret; - } - - /* RF-kill already asserted again... */ - if (!cmd.resp_pkt) { - ret = -ERFKILL; - goto out_free_resp; - } - - len = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (len < sizeof(*query)) { - IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); - ret = -EIO; - goto out_free_resp; - } - - query = (void *)cmd.resp_pkt->data; - - results->matched_profiles = le32_to_cpu(query->matched_profiles); - memcpy(results->matches, query->matches, sizeof(results->matches)); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); -#endif - -out_free_resp: - iwl_free_resp(&cmd); - return ret; -} - -static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct cfg80211_wowlan_nd_info *net_detect = NULL; - struct cfg80211_wowlan_wakeup wakeup = { - .pattern_idx = -1, - }; - struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; - struct iwl_mvm_nd_query_results query; - struct iwl_wowlan_status *fw_status; - unsigned long matched_profiles; - u32 reasons = 0; - int i, j, n_matches, ret; - - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); - if (!IS_ERR_OR_NULL(fw_status)) { - reasons = le32_to_cpu(fw_status->wakeup_reasons); - kfree(fw_status); - } - - if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) - wakeup.rfkill_release = true; - - if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) - goto out; - - ret = iwl_mvm_netdetect_query_results(mvm, &query); - if (ret || !query.matched_profiles) { - wakeup_report = NULL; - goto out; - } - - matched_profiles = query.matched_profiles; - if (mvm->n_nd_match_sets) { - n_matches = hweight_long(matched_profiles); - } else { - IWL_ERR(mvm, "no net detect match information available\n"); - n_matches = 0; - } - - net_detect = kzalloc(sizeof(*net_detect) + - (n_matches * sizeof(net_detect->matches[0])), - GFP_KERNEL); - if (!net_detect || !n_matches) - goto out_report_nd; - - for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { - struct iwl_scan_offload_profile_match *fw_match; - struct cfg80211_wowlan_nd_match *match; - int idx, n_channels = 0; - - fw_match = &query.matches[i]; - - for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) - n_channels += hweight8(fw_match->matching_channels[j]); - - match = kzalloc(sizeof(*match) + - (n_channels * sizeof(*match->channels)), - GFP_KERNEL); - if (!match) - goto out_report_nd; - - net_detect->matches[net_detect->n_matches++] = match; - - /* We inverted the order of the SSIDs in the scan - * request, so invert the index here. - */ - idx = mvm->n_nd_match_sets - i - 1; - match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; - memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, - match->ssid.ssid_len); - - if (mvm->n_nd_channels < n_channels) - continue; - - for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++) - if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) - match->channels[match->n_channels++] = - mvm->nd_channels[j]->center_freq; - } - -out_report_nd: - wakeup.net_detect = net_detect; -out: - iwl_mvm_free_nd(mvm); - - mutex_unlock(&mvm->mutex); - ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); - - if (net_detect) { - for (i = 0; i < net_detect->n_matches; i++) - kfree(net_detect->matches[i]); - kfree(net_detect); - } -} - -static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) -{ -#ifdef CONFIG_IWLWIFI_DEBUGFS - const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; - u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; - u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; - - if (!mvm->store_d3_resume_sram) - return; - - if (!mvm->d3_resume_sram) { - mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); - if (!mvm->d3_resume_sram) - return; - } - - iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); -#endif -} - -static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - /* skip the one we keep connection on */ - if (data == vif) - return; - - if (vif->type == NL80211_IFTYPE_STATION) - ieee80211_resume_disconnect(vif); -} - -static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) -{ - struct ieee80211_vif *vif = NULL; - int ret; - enum iwl_d3_status d3_status; - bool keep = false; - - mutex_lock(&mvm->mutex); - - /* get the BSS vif pointer again */ - vif = iwl_mvm_get_bss_vif(mvm); - if (IS_ERR_OR_NULL(vif)) - goto err; - - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); - if (ret) - goto err; - - if (d3_status != IWL_D3_STATUS_ALIVE) { - IWL_INFO(mvm, "Device was reset during suspend\n"); - goto err; - } - - /* query SRAM first in case we want event logging */ - iwl_mvm_read_d3_sram(mvm); - - /* - * Query the current location and source from the D3 firmware so we - * can play it back when we re-intiailize the D0 firmware - */ - iwl_mvm_update_changed_regdom(mvm); - - if (mvm->net_detect) { - iwl_mvm_query_netdetect_reasons(mvm, vif); - /* has unlocked the mutex, so skip that */ - goto out; - } else { - keep = iwl_mvm_query_wakeup_reasons(mvm, vif); -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (keep) - mvm->keep_vif = vif; -#endif - /* has unlocked the mutex, so skip that */ - goto out_iterate; - } - -err: - iwl_mvm_free_nd(mvm); - mutex_unlock(&mvm->mutex); - -out_iterate: - if (!test) - ieee80211_iterate_active_interfaces_rtnl(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); - -out: - /* return 1 to reconfigure the device */ - set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); - - /* We always return 1, which causes mac80211 to do a reconfig - * with IEEE80211_RECONFIG_TYPE_RESTART. This type of - * reconfig calls iwl_mvm_restart_complete(), where we unref - * the IWL_MVM_REF_UCODE_DOWN, so we need to take the - * reference here. - */ - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - return 1; -} - -static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) -{ - iwl_trans_resume(mvm->trans); - - return __iwl_mvm_resume(mvm, false); -} - -static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) -{ - bool exit_now; - enum iwl_d3_status d3_status; - - iwl_trans_d3_resume(mvm->trans, &d3_status, false); - - /* - * make sure to clear D0I3_DEFER_WAKEUP before - * calling iwl_trans_resume(), which might wait - * for d0i3 exit completion. - */ - mutex_lock(&mvm->d0i3_suspend_mutex); - __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, - &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - if (exit_now) { - IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); - _iwl_mvm_exit_d0i3(mvm); - } - - iwl_trans_resume(mvm->trans); - - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { - int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); - - if (ret) - return ret; - /* - * d0i3 exit will be deferred until reconfig_complete. - * make sure there we are out of d0i3. - */ - } - return 0; -} - -int iwl_mvm_resume(struct ieee80211_hw *hw) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - /* 'any' trigger means d0i3 was used */ - if (hw->wiphy->wowlan_config->any) - return iwl_mvm_resume_d0i3(mvm); - else - return iwl_mvm_resume_d3(mvm); -} - -void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - device_set_wakeup_enable(mvm->trans->dev, enabled); -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) -{ - struct iwl_mvm *mvm = inode->i_private; - int err; - - if (mvm->d3_test_active) - return -EBUSY; - - file->private_data = inode->i_private; - - ieee80211_stop_queues(mvm->hw); - synchronize_net(); - - /* start pseudo D3 */ - rtnl_lock(); - err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); - rtnl_unlock(); - if (err > 0) - err = -EINVAL; - if (err) { - ieee80211_wake_queues(mvm->hw); - return err; - } - mvm->d3_test_active = true; - mvm->keep_vif = NULL; - return 0; -} - -static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - u32 pme_asserted; - - while (true) { - /* read pme_ptr if available */ - if (mvm->d3_test_pme_ptr) { - pme_asserted = iwl_trans_read_mem32(mvm->trans, - mvm->d3_test_pme_ptr); - if (pme_asserted) - break; - } - - if (msleep_interruptible(100)) - break; - } - - return 0; -} - -static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - /* skip the one we keep connection on */ - if (_data == vif) - return; - - if (vif->type == NL80211_IFTYPE_STATION) - ieee80211_connection_loss(vif); -} - -static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) -{ - struct iwl_mvm *mvm = inode->i_private; - int remaining_time = 10; - - mvm->d3_test_active = false; - rtnl_lock(); - __iwl_mvm_resume(mvm, true); - rtnl_unlock(); - iwl_abort_notification_waits(&mvm->notif_wait); - ieee80211_restart_hw(mvm->hw); - - /* wait for restart and disconnect all interfaces */ - while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - remaining_time > 0) { - remaining_time--; - msleep(1000); - } - - if (remaining_time == 0) - IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); - - ieee80211_wake_queues(mvm->hw); - - return 0; -} - -const struct file_operations iwl_dbgfs_d3_test_ops = { - .llseek = no_llseek, - .open = iwl_mvm_d3_test_open, - .read = iwl_mvm_d3_test_read, - .release = iwl_mvm_d3_test_release, -}; -#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c deleted file mode 100644 index 7904b41a04c6..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ /dev/null @@ -1,1483 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "mvm.h" -#include "fw-api-tof.h" -#include "debugfs.h" - -static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - enum iwl_dbgfs_pm_mask param, int val) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; - - dbgfs_pm->mask |= param; - - switch (param) { - case MVM_DEBUGFS_PM_KEEP_ALIVE: { - int dtimper = vif->bss_conf.dtim_period ?: 1; - int dtimper_msec = dtimper * vif->bss_conf.beacon_int; - - IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); - if (val * MSEC_PER_SEC < 3 * dtimper_msec) - IWL_WARN(mvm, - "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", - val * MSEC_PER_SEC, 3 * dtimper_msec); - dbgfs_pm->keep_alive_seconds = val; - break; - } - case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: - IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", - val ? "enabled" : "disabled"); - dbgfs_pm->skip_over_dtim = val; - break; - case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: - IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); - dbgfs_pm->skip_dtim_periods = val; - break; - case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: - IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); - dbgfs_pm->rx_data_timeout = val; - break; - case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: - IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); - dbgfs_pm->tx_data_timeout = val; - break; - case MVM_DEBUGFS_PM_LPRX_ENA: - IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); - dbgfs_pm->lprx_ena = val; - break; - case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD: - IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); - dbgfs_pm->lprx_rssi_threshold = val; - break; - case MVM_DEBUGFS_PM_SNOOZE_ENABLE: - IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val); - dbgfs_pm->snooze_ena = val; - break; - case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING: - IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); - dbgfs_pm->uapsd_misbehaving = val; - break; - case MVM_DEBUGFS_PM_USE_PS_POLL: - IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); - dbgfs_pm->use_ps_poll = val; - break; - } -} - -static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - enum iwl_dbgfs_pm_mask param; - int val, ret; - - if (!strncmp("keep_alive=", buf, 11)) { - if (sscanf(buf + 11, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_KEEP_ALIVE; - } else if (!strncmp("skip_over_dtim=", buf, 15)) { - if (sscanf(buf + 15, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; - } else if (!strncmp("skip_dtim_periods=", buf, 18)) { - if (sscanf(buf + 18, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; - } else if (!strncmp("rx_data_timeout=", buf, 16)) { - if (sscanf(buf + 16, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; - } else if (!strncmp("tx_data_timeout=", buf, 16)) { - if (sscanf(buf + 16, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; - } else if (!strncmp("lprx=", buf, 5)) { - if (sscanf(buf + 5, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_LPRX_ENA; - } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) { - if (sscanf(buf + 20, "%d", &val) != 1) - return -EINVAL; - if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < - POWER_LPRX_RSSI_THRESHOLD_MIN) - return -EINVAL; - param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; - } else if (!strncmp("snooze_enable=", buf, 14)) { - if (sscanf(buf + 14, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_SNOOZE_ENABLE; - } else if (!strncmp("uapsd_misbehaving=", buf, 18)) { - if (sscanf(buf + 18, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; - } else if (!strncmp("use_ps_poll=", buf, 12)) { - if (sscanf(buf + 12, "%d", &val) != 1) - return -EINVAL; - param = MVM_DEBUGFS_PM_USE_PS_POLL; - } else { - return -EINVAL; - } - - mutex_lock(&mvm->mutex); - iwl_dbgfs_update_pm(mvm, vif, param, val); - ret = iwl_mvm_power_update_mac(mvm); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - char buf[64]; - int bufsz = sizeof(buf); - int pos; - - pos = scnprintf(buf, bufsz, "bss limit = %d\n", - vif->bss_conf.txpower); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_pm_params_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[512]; - int bufsz = sizeof(buf); - int pos; - - pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_mac_params_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u8 ap_sta_id; - struct ieee80211_chanctx_conf *chanctx_conf; - char buf[512]; - int bufsz = sizeof(buf); - int pos = 0; - int i; - - mutex_lock(&mvm->mutex); - - ap_sta_id = mvmvif->ap_sta_id; - - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_ADHOC: - pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n"); - break; - case NL80211_IFTYPE_STATION: - pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n"); - break; - case NL80211_IFTYPE_AP: - pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n"); - break; - case NL80211_IFTYPE_P2P_CLIENT: - pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n"); - break; - case NL80211_IFTYPE_P2P_GO: - pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n"); - break; - case NL80211_IFTYPE_P2P_DEVICE: - pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n"); - break; - default: - break; - } - - pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n", - mvmvif->id, mvmvif->color); - pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", - vif->bss_conf.bssid); - pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); - for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) - pos += scnprintf(buf+pos, bufsz-pos, - "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", - i, mvmvif->queue_params[i].txop, - mvmvif->queue_params[i].cw_min, - mvmvif->queue_params[i].cw_max, - mvmvif->queue_params[i].aifs, - mvmvif->queue_params[i].uapsd); - - if (vif->type == NL80211_IFTYPE_STATION && - ap_sta_id != IWL_MVM_STATION_COUNT) { - struct ieee80211_sta *sta; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id], - lockdep_is_held(&mvm->mutex)); - if (!IS_ERR_OR_NULL(sta)) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - - pos += scnprintf(buf+pos, bufsz-pos, - "ap_sta_id %d - reduced Tx power %d\n", - ap_sta_id, - mvm_sta->bt_reduced_txpower); - } - } - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - if (chanctx_conf) - pos += scnprintf(buf+pos, bufsz-pos, - "idle rx chains %d, active rx chains: %d\n", - chanctx_conf->rx_chains_static, - chanctx_conf->rx_chains_dynamic); - rcu_read_unlock(); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, - enum iwl_dbgfs_bf_mask param, int value) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; - - dbgfs_bf->mask |= param; - - switch (param) { - case MVM_DEBUGFS_BF_ENERGY_DELTA: - dbgfs_bf->bf_energy_delta = value; - break; - case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: - dbgfs_bf->bf_roaming_energy_delta = value; - break; - case MVM_DEBUGFS_BF_ROAMING_STATE: - dbgfs_bf->bf_roaming_state = value; - break; - case MVM_DEBUGFS_BF_TEMP_THRESHOLD: - dbgfs_bf->bf_temp_threshold = value; - break; - case MVM_DEBUGFS_BF_TEMP_FAST_FILTER: - dbgfs_bf->bf_temp_fast_filter = value; - break; - case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER: - dbgfs_bf->bf_temp_slow_filter = value; - break; - case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: - dbgfs_bf->bf_enable_beacon_filter = value; - break; - case MVM_DEBUGFS_BF_DEBUG_FLAG: - dbgfs_bf->bf_debug_flag = value; - break; - case MVM_DEBUGFS_BF_ESCAPE_TIMER: - dbgfs_bf->bf_escape_timer = value; - break; - case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: - dbgfs_bf->ba_enable_beacon_abort = value; - break; - case MVM_DEBUGFS_BA_ESCAPE_TIMER: - dbgfs_bf->ba_escape_timer = value; - break; - } -} - -static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - enum iwl_dbgfs_bf_mask param; - int value, ret = 0; - - if (!strncmp("bf_energy_delta=", buf, 16)) { - if (sscanf(buf+16, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_ENERGY_DELTA_MIN || - value > IWL_BF_ENERGY_DELTA_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_ENERGY_DELTA; - } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { - if (sscanf(buf+24, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || - value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; - } else if (!strncmp("bf_roaming_state=", buf, 17)) { - if (sscanf(buf+17, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_ROAMING_STATE_MIN || - value > IWL_BF_ROAMING_STATE_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_ROAMING_STATE; - } else if (!strncmp("bf_temp_threshold=", buf, 18)) { - if (sscanf(buf+18, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_TEMP_THRESHOLD_MIN || - value > IWL_BF_TEMP_THRESHOLD_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_TEMP_THRESHOLD; - } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) { - if (sscanf(buf+20, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_TEMP_FAST_FILTER_MIN || - value > IWL_BF_TEMP_FAST_FILTER_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER; - } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) { - if (sscanf(buf+20, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || - value > IWL_BF_TEMP_SLOW_FILTER_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER; - } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { - if (sscanf(buf+24, "%d", &value) != 1) - return -EINVAL; - if (value < 0 || value > 1) - return -EINVAL; - param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; - } else if (!strncmp("bf_debug_flag=", buf, 14)) { - if (sscanf(buf+14, "%d", &value) != 1) - return -EINVAL; - if (value < 0 || value > 1) - return -EINVAL; - param = MVM_DEBUGFS_BF_DEBUG_FLAG; - } else if (!strncmp("bf_escape_timer=", buf, 16)) { - if (sscanf(buf+16, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BF_ESCAPE_TIMER_MIN || - value > IWL_BF_ESCAPE_TIMER_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BF_ESCAPE_TIMER; - } else if (!strncmp("ba_escape_timer=", buf, 16)) { - if (sscanf(buf+16, "%d", &value) != 1) - return -EINVAL; - if (value < IWL_BA_ESCAPE_TIMER_MIN || - value > IWL_BA_ESCAPE_TIMER_MAX) - return -EINVAL; - param = MVM_DEBUGFS_BA_ESCAPE_TIMER; - } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { - if (sscanf(buf+23, "%d", &value) != 1) - return -EINVAL; - if (value < 0 || value > 1) - return -EINVAL; - param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; - } else { - return -EINVAL; - } - - mutex_lock(&mvm->mutex); - iwl_dbgfs_update_bf(vif, param, value); - if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - else - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_bf_params_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_beacon_filter_cmd cmd = { - IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = - cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT), - .ba_enable_beacon_abort = - cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT), - }; - - iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); - if (mvmvif->bf_data.bf_enabled) - cmd.bf_enable_beacon_filter = cpu_to_le32(1); - else - cmd.bf_enable_beacon_filter = 0; - - pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", - le32_to_cpu(cmd.bf_energy_delta)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", - le32_to_cpu(cmd.bf_roaming_energy_delta)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", - le32_to_cpu(cmd.bf_roaming_state)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n", - le32_to_cpu(cmd.bf_temp_threshold)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n", - le32_to_cpu(cmd.bf_temp_fast_filter)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n", - le32_to_cpu(cmd.bf_temp_slow_filter)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", - le32_to_cpu(cmd.bf_enable_beacon_filter)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", - le32_to_cpu(cmd.bf_debug_flag)); - pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", - le32_to_cpu(cmd.bf_escape_timer)); - pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", - le32_to_cpu(cmd.ba_escape_timer)); - pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", - le32_to_cpu(cmd.ba_enable_beacon_abort)); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static inline char *iwl_dbgfs_is_match(char *name, char *buf) -{ - int len = strlen(name); - - return !strncmp(name, buf, len) ? buf + len : NULL; -} - -static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = -EINVAL; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("tof_disabled=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.tof_disabled = value; - goto out; - } - - data = iwl_dbgfs_is_match("one_sided_disabled=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.one_sided_disabled = value; - goto out; - } - - data = iwl_dbgfs_is_match("is_debug_mode=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.is_debug_mode = value; - goto out; - } - - data = iwl_dbgfs_is_match("is_buf=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.is_buf_required = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_tof_cfg=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - ret = iwl_mvm_tof_config_cmd(mvm); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_enable_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_config_cmd *cmd; - - cmd = &mvm->tof_data.tof_cfg; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", - cmd->tof_disabled); - pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", - cmd->one_sided_disabled); - pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", - cmd->is_debug_mode); - pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", - cmd->is_buf_required); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("burst_period=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (!ret) - mvm->tof_data.responder_cfg.burst_period = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("min_delta_ftm=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.min_delta_ftm = value; - goto out; - } - - data = iwl_dbgfs_is_match("burst_duration=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.burst_duration = value; - goto out; - } - - data = iwl_dbgfs_is_match("num_of_burst_exp=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.num_of_burst_exp = value; - goto out; - } - - data = iwl_dbgfs_is_match("abort_responder=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.abort_responder = value; - goto out; - } - - data = iwl_dbgfs_is_match("get_ch_est=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.get_ch_est = value; - goto out; - } - - data = iwl_dbgfs_is_match("recv_sta_req_params=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.recv_sta_req_params = value; - goto out; - } - - data = iwl_dbgfs_is_match("channel_num=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.channel_num = value; - goto out; - } - - data = iwl_dbgfs_is_match("bandwidth=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.bandwidth = value; - goto out; - } - - data = iwl_dbgfs_is_match("rate=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.rate = value; - goto out; - } - - data = iwl_dbgfs_is_match("bssid=", buf); - if (data) { - u8 *mac = mvm->tof_data.responder_cfg.bssid; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - } - - data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("toa_offset=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.toa_offset = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("center_freq=", buf); - if (data) { - struct iwl_tof_responder_config_cmd *cmd = - &mvm->tof_data.responder_cfg; - - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - enum ieee80211_band band = (cmd->channel_num <= 14) ? - IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; - struct ieee80211_channel chn = { - .band = band, - .center_freq = ieee80211_channel_to_frequency( - cmd->channel_num, band), - }; - struct cfg80211_chan_def chandef = { - .chan = &chn, - .center_freq1 = - ieee80211_channel_to_frequency(value, - band), - }; - - cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef); - } - goto out; - } - - data = iwl_dbgfs_is_match("ftm_per_burst=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.ftm_per_burst = value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; - goto out; - } - - data = iwl_dbgfs_is_match("asap_mode=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.asap_mode = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_responder_cfg=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - ret = iwl_mvm_tof_responder_cmd(mvm, vif); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_responder_config_cmd *cmd; - - cmd = &mvm->tof_data.responder_cfg; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", - le16_to_cpu(cmd->burst_period)); - pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", - cmd->burst_duration); - pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", - cmd->bandwidth); - pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", - cmd->channel_num); - pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", - cmd->ctrl_ch_position); - pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", - cmd->bssid); - pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", - cmd->min_delta_ftm); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", - cmd->num_of_burst_exp); - pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate); - pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", - cmd->abort_responder); - pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", - cmd->get_ch_est); - pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", - cmd->recv_sta_req_params); - pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", - cmd->ftm_per_burst); - pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", - cmd->ftm_resp_ts_avail); - pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", - cmd->asap_mode); - pos += scnprintf(buf + pos, bufsz - pos, - "tsf_timer_offset_msecs = %d\n", - le16_to_cpu(cmd->tsf_timer_offset_msecs)); - pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", - le16_to_cpu(cmd->toa_offset)); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("request_id=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.request_id = value; - goto out; - } - - data = iwl_dbgfs_is_match("initiator=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.initiator = value; - goto out; - } - - data = iwl_dbgfs_is_match("one_sided_los_disable=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.one_sided_los_disable = value; - goto out; - } - - data = iwl_dbgfs_is_match("req_timeout=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.req_timeout = value; - goto out; - } - - data = iwl_dbgfs_is_match("report_policy=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.report_policy = value; - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_random=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.macaddr_random = value; - goto out; - } - - data = iwl_dbgfs_is_match("num_of_ap=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.num_of_ap = value; - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_template=", buf); - if (data) { - u8 mac[ETH_ALEN]; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_mask=", buf); - if (data) { - u8 mac[ETH_ALEN]; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); - goto out; - } - - data = iwl_dbgfs_is_match("ap=", buf); - if (data) { - struct iwl_tof_range_req_ap_entry ap = {}; - int size = sizeof(struct iwl_tof_range_req_ap_entry); - u16 burst_period; - u8 *mac = ap.bssid; - unsigned int i; - - if (sscanf(data, "%u %hhd %hhd %hhd" - "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" - "%hhd %hhd %hd" - "%hhd %hhd %d" - "%hhx %hhd %hhd %hhd", - &i, &ap.channel_num, &ap.bandwidth, - &ap.ctrl_ch_position, - mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, - &ap.measure_type, &ap.num_of_bursts, - &burst_period, - &ap.samples_per_burst, &ap.retries_per_sample, - &ap.tsf_delta, &ap.location_req, &ap.asap_mode, - &ap.enable_dyn_ack, &ap.rssi) != 20) { - ret = -EINVAL; - goto out; - } - if (i >= IWL_MVM_TOF_MAX_APS) { - IWL_ERR(mvm, "Invalid AP index %d\n", i); - ret = -EINVAL; - goto out; - } - - ap.burst_period = cpu_to_le16(burst_period); - - memcpy(&mvm->tof_data.range_req.ap[i], &ap, size); - goto out; - } - - data = iwl_dbgfs_is_match("send_range_request=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) - ret = iwl_mvm_tof_range_request_cmd(mvm, vif); - goto out; - } - - ret = -EINVAL; -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[512]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_range_req_cmd *cmd; - int i; - - cmd = &mvm->tof_data.range_req; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", - cmd->request_id); - pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", - cmd->initiator); - pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", - cmd->one_sided_los_disable); - pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", - cmd->req_timeout); - pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", - cmd->report_policy); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", - cmd->macaddr_random); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", - cmd->macaddr_template); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", - cmd->macaddr_mask); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", - cmd->num_of_ap); - for (i = 0; i < cmd->num_of_ap; i++) { - struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; - - pos += scnprintf(buf + pos, bufsz - pos, - "ap %.2d: channel_num=%hhd bw=%hhd" - " control=%hhd bssid=%pM type=%hhd" - " num_of_bursts=%hhd burst_period=%hd ftm=%hhd" - " retries=%hhd tsf_delta=%d" - " tsf_delta_direction=%hhd location_req=0x%hhx " - " asap=%hhd enable=%hhd rssi=%hhd\n", - i, ap->channel_num, ap->bandwidth, - ap->ctrl_ch_position, ap->bssid, - ap->measure_type, ap->num_of_bursts, - ap->burst_period, ap->samples_per_burst, - ap->retries_per_sample, ap->tsf_delta, - ap->tsf_delta_direction, - ap->location_req, ap->asap_mode, - ap->enable_dyn_ack, ap->rssi); - } - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.tsf_timer_offset_msec = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("min_delta_ftm=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.min_delta_ftm = value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw20M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw40M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw80M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("send_range_req_ext=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) - ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); - goto out; - } - - ret = -EINVAL; -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_range_req_ext_cmd *cmd; - - cmd = &mvm->tof_data.range_req_ext; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, - "tsf_timer_offset_msec = %hd\n", - cmd->tsf_timer_offset_msec); - pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n", - cmd->min_delta_ftm); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw20M = %hhd\n", - cmd->ftm_format_and_bw20M); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw40M = %hhd\n", - cmd->ftm_format_and_bw40M); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw80M = %hhd\n", - cmd->ftm_format_and_bw80M); - - mutex_unlock(&mvm->mutex); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int abort_id, ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("abort_id=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.last_abort_id = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_range_abort=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - abort_id = mvm->tof_data.last_abort_id; - ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[32]; - int pos = 0; - const size_t bufsz = sizeof(buf); - int last_abort_id; - - mutex_lock(&mvm->mutex); - last_abort_id = mvm->tof_data.last_abort_id; - mutex_unlock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", - last_abort_id); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char *buf; - int pos = 0; - const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256; - struct iwl_tof_range_rsp_ntfy *cmd; - int i, ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - cmd = &mvm->tof_data.range_resp; - - pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", - cmd->request_id); - pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", - cmd->request_status); - pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", - cmd->last_in_batch); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", - cmd->num_of_aps); - for (i = 0; i < cmd->num_of_aps; i++) { - struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; - - pos += scnprintf(buf + pos, bufsz - pos, - "ap %.2d: bssid=%pM status=%hhd bw=%hhd" - " rtt=%d rtt_var=%d rtt_spread=%d" - " rssi=%hhd rssi_spread=%hhd" - " range=%d range_var=%d" - " time_stamp=%d\n", - i, ap->bssid, ap->measure_status, - ap->measure_bw, - ap->rtt, ap->rtt_variance, ap->rtt_spread, - ap->rssi, ap->rssi_spread, ap->range, - ap->range_variance, ap->timestamp); - } - mutex_unlock(&mvm->mutex); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u8 value; - int ret; - - ret = kstrtou8(buf, 0, &value); - if (ret) - return ret; - if (value > 1) - return -EINVAL; - - mutex_lock(&mvm->mutex); - iwl_mvm_update_low_latency(mvm, vif, value); - mutex_unlock(&mvm->mutex); - - return count; -} - -static ssize_t iwl_dbgfs_low_latency_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[2]; - - buf[0] = mvmvif->low_latency ? '1' : '0'; - buf[1] = '\n'; - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); -} - -static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[20]; - int len; - - len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - bool ret; - - mutex_lock(&mvm->mutex); - ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid); - mutex_unlock(&mvm->mutex); - - return ret ? count : -EINVAL; -} - -static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; - struct iwl_mvm_phy_ctxt *phy_ctxt; - u16 value; - int ret; - - ret = kstrtou16(buf, 0, &value); - if (ret) - return ret; - - mutex_lock(&mvm->mutex); - rcu_read_lock(); - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - /* make sure the channel context is assigned */ - if (!chanctx_conf) { - rcu_read_unlock(); - mutex_unlock(&mvm->mutex); - return -EINVAL; - } - - phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; - rcu_read_unlock(); - - mvm->dbgfs_rx_phyinfo = value; - - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, - chanctx_conf->rx_chains_static, - chanctx_conf->rx_chains_dynamic); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[8]; - - snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo); - - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); -} - -#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) -#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) -#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, vif, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ - } while (0) - -MVM_DEBUGFS_READ_FILE_OPS(mac_params); -MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); -MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); - -void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct dentry *dbgfs_dir = vif->debugfs_dir; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[100]; - - /* - * Check if debugfs directory already exist before creating it. - * This may happen when, for example, resetting hw or suspend-resume - */ - if (!dbgfs_dir || mvmvif->dbgfs_dir) - return; - - mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); - - if (!mvmvif->dbgfs_dir) { - IWL_ERR(mvm, "Failed to create debugfs directory under %s\n", - dbgfs_dir->d_name.name); - return; - } - - if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && - ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || - (vif->type == NL80211_IFTYPE_STATION && vif->p2p && - mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))) - MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | - S_IRUSR); - - MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - - if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && - mvmvif == mvm->bf_allowed_vif) - MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && - !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { - if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) - MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, - mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, - S_IRUSR); - } - - /* - * Create symlink for convenience pointing to interface specific - * debugfs entries for the driver. For example, under - * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/ - * find - * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ - */ - snprintf(buf, 100, "../../../%s/%s/%s/%s", - dbgfs_dir->d_parent->d_parent->d_name.name, - dbgfs_dir->d_parent->d_name.name, - dbgfs_dir->d_name.name, - mvmvif->dbgfs_dir->d_name.name); - - mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, - mvm->debugfs_dir, buf); - if (!mvmvif->dbgfs_slink) - IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n", - dbgfs_dir->d_name.name); - return; -err: - IWL_ERR(mvm, "Can't create debugfs entity\n"); -} - -void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - debugfs_remove(mvmvif->dbgfs_slink); - mvmvif->dbgfs_slink = NULL; - - debugfs_remove_recursive(mvmvif->dbgfs_dir); - mvmvif->dbgfs_dir = NULL; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c deleted file mode 100644 index 05928fb4021d..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ /dev/null @@ -1,1516 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include - -#include "mvm.h" -#include "sta.h" -#include "iwl-io.h" -#include "debugfs.h" -#include "iwl-fw-error-dump.h" - -static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int ret; - u32 scd_q_msk; - - if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) - return -EIO; - - if (sscanf(buf, "%x", &scd_q_msk) != 1) - return -EINVAL; - - IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk); - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count; - mutex_unlock(&mvm->mutex); - - return ret; -} - -static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_sta *mvmsta; - int sta_id, drain, ret; - - if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) - return -EIO; - - if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) - return -EINVAL; - if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) - return -EINVAL; - if (drain < 0 || drain > 1) - return -EINVAL; - - mutex_lock(&mvm->mutex); - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - - if (!mvmsta) - ret = -ENOENT; - else - ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; - - mutex_unlock(&mvm->mutex); - - return ret; -} - -static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - const struct fw_img *img; - unsigned int ofs, len; - size_t ret; - u8 *ptr; - - if (!mvm->ucode_loaded) - return -EINVAL; - - /* default is to dump the entire data segment */ - img = &mvm->fw->img[mvm->cur_ucode]; - ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; - len = img->sec[IWL_UCODE_SECTION_DATA].len; - - if (mvm->dbgfs_sram_len) { - ofs = mvm->dbgfs_sram_offset; - len = mvm->dbgfs_sram_len; - } - - ptr = kzalloc(len, GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); - - ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); - - kfree(ptr); - - return ret; -} - -static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - const struct fw_img *img; - u32 offset, len; - u32 img_offset, img_len; - - if (!mvm->ucode_loaded) - return -EINVAL; - - img = &mvm->fw->img[mvm->cur_ucode]; - img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; - img_len = img->sec[IWL_UCODE_SECTION_DATA].len; - - if (sscanf(buf, "%x,%x", &offset, &len) == 2) { - if ((offset & 0x3) || (len & 0x3)) - return -EINVAL; - - if (offset + len > img_offset + img_len) - return -EINVAL; - - mvm->dbgfs_sram_offset = offset; - mvm->dbgfs_sram_len = len; - } else { - mvm->dbgfs_sram_offset = 0; - mvm->dbgfs_sram_len = 0; - } - - return count; -} - -static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - char buf[16]; - int pos; - - if (!mvm->temperature_test) - pos = scnprintf(buf , sizeof(buf), "disabled\n"); - else - pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -/* - * Set NIC Temperature - * Cause the driver to ignore the actual NIC temperature reported by the FW - * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - - * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX - * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE - */ -static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - int temperature; - - if (!mvm->ucode_loaded && !mvm->temperature_test) - return -EIO; - - if (kstrtoint(buf, 10, &temperature)) - return -EINVAL; - /* not a legal temperature */ - if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && - temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || - temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) - return -EINVAL; - - mutex_lock(&mvm->mutex); - if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { - if (!mvm->temperature_test) - goto out; - - mvm->temperature_test = false; - /* Since we can't read the temp while awake, just set - * it to zero until we get the next RX stats from the - * firmware. - */ - mvm->temperature = 0; - } else { - mvm->temperature_test = true; - mvm->temperature = temperature; - } - IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", - mvm->temperature_test ? "En" : "Dis" , - mvm->temperature); - /* handle the temperature change */ - iwl_mvm_tt_handler(mvm); - -out: - mutex_unlock(&mvm->mutex); - - return count; -} - -static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - char buf[16]; - int pos, temp; - - if (!mvm->ucode_loaded) - return -EIO; - - mutex_lock(&mvm->mutex); - temp = iwl_mvm_get_temp(mvm); - mutex_unlock(&mvm->mutex); - - if (temp < 0) - return temp; - - pos = scnprintf(buf , sizeof(buf), "%d\n", temp); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct ieee80211_sta *sta; - char buf[400]; - int i, pos = 0, bufsz = sizeof(buf); - - mutex_lock(&mvm->mutex); - - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (!sta) - pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); - else if (IS_ERR(sta)) - pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", - PTR_ERR(sta)); - else - pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", - sta->addr); - } - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - char buf[64]; - int bufsz = sizeof(buf); - int pos = 0; - - pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", - mvm->disable_power_off); - pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", - mvm->disable_power_off_d3); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int ret, val; - - if (!mvm->ucode_loaded) - return -EIO; - - if (!strncmp("disable_power_off_d0=", buf, 21)) { - if (sscanf(buf + 21, "%d", &val) != 1) - return -EINVAL; - mvm->disable_power_off = val; - } else if (!strncmp("disable_power_off_d3=", buf, 21)) { - if (sscanf(buf + 21, "%d", &val) != 1) - return -EINVAL; - mvm->disable_power_off_d3 = val; - } else { - return -EINVAL; - } - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_power_update_device(mvm); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - - -#define BT_MBOX_PRINT(_num, _field, _end) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t%s: %d%s", \ - #_field, \ - BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", "); - -static -int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, - int pos, int bufsz) -{ - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); - - BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); - BT_MBOX_PRINT(0, LE_PROF1, false); - BT_MBOX_PRINT(0, LE_PROF2, false); - BT_MBOX_PRINT(0, LE_PROF_OTHER, false); - BT_MBOX_PRINT(0, CHL_SEQ_N, false); - BT_MBOX_PRINT(0, INBAND_S, false); - BT_MBOX_PRINT(0, LE_MIN_RSSI, false); - BT_MBOX_PRINT(0, LE_SCAN, false); - BT_MBOX_PRINT(0, LE_ADV, false); - BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); - BT_MBOX_PRINT(0, OPEN_CON_1, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); - - BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); - BT_MBOX_PRINT(1, IP_SR, false); - BT_MBOX_PRINT(1, LE_MSTR, false); - BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); - BT_MBOX_PRINT(1, MSG_TYPE, false); - BT_MBOX_PRINT(1, SSN, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); - - BT_MBOX_PRINT(2, SNIFF_ACT, false); - BT_MBOX_PRINT(2, PAG, false); - BT_MBOX_PRINT(2, INQUIRY, false); - BT_MBOX_PRINT(2, CONN, false); - BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); - BT_MBOX_PRINT(2, DISC, false); - BT_MBOX_PRINT(2, SCO_TX_ACT, false); - BT_MBOX_PRINT(2, SCO_RX_ACT, false); - BT_MBOX_PRINT(2, ESCO_RE_TX, false); - BT_MBOX_PRINT(2, SCO_DURATION, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); - - BT_MBOX_PRINT(3, SCO_STATE, false); - BT_MBOX_PRINT(3, SNIFF_STATE, false); - BT_MBOX_PRINT(3, A2DP_STATE, false); - BT_MBOX_PRINT(3, ACL_STATE, false); - BT_MBOX_PRINT(3, MSTR_STATE, false); - BT_MBOX_PRINT(3, OBX_STATE, false); - BT_MBOX_PRINT(3, OPEN_CON_2, false); - BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); - BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); - BT_MBOX_PRINT(3, INBAND_P, false); - BT_MBOX_PRINT(3, MSG_TYPE_2, false); - BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); - - return pos; -} - -static -int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif, - char *buf, int pos, int bufsz) -{ - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); - - BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); - BT_MBOX_PRINT(0, LE_PROF1, false); - BT_MBOX_PRINT(0, LE_PROF2, false); - BT_MBOX_PRINT(0, LE_PROF_OTHER, false); - BT_MBOX_PRINT(0, CHL_SEQ_N, false); - BT_MBOX_PRINT(0, INBAND_S, false); - BT_MBOX_PRINT(0, LE_MIN_RSSI, false); - BT_MBOX_PRINT(0, LE_SCAN, false); - BT_MBOX_PRINT(0, LE_ADV, false); - BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); - BT_MBOX_PRINT(0, OPEN_CON_1, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); - - BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); - BT_MBOX_PRINT(1, IP_SR, false); - BT_MBOX_PRINT(1, LE_MSTR, false); - BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); - BT_MBOX_PRINT(1, MSG_TYPE, false); - BT_MBOX_PRINT(1, SSN, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); - - BT_MBOX_PRINT(2, SNIFF_ACT, false); - BT_MBOX_PRINT(2, PAG, false); - BT_MBOX_PRINT(2, INQUIRY, false); - BT_MBOX_PRINT(2, CONN, false); - BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); - BT_MBOX_PRINT(2, DISC, false); - BT_MBOX_PRINT(2, SCO_TX_ACT, false); - BT_MBOX_PRINT(2, SCO_RX_ACT, false); - BT_MBOX_PRINT(2, ESCO_RE_TX, false); - BT_MBOX_PRINT(2, SCO_DURATION, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); - - BT_MBOX_PRINT(3, SCO_STATE, false); - BT_MBOX_PRINT(3, SNIFF_STATE, false); - BT_MBOX_PRINT(3, A2DP_STATE, false); - BT_MBOX_PRINT(3, ACL_STATE, false); - BT_MBOX_PRINT(3, MSTR_STATE, false); - BT_MBOX_PRINT(3, OBX_STATE, false); - BT_MBOX_PRINT(3, OPEN_CON_2, false); - BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); - BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); - BT_MBOX_PRINT(3, INBAND_P, false); - BT_MBOX_PRINT(3, MSG_TYPE_2, false); - BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); - - return pos; -} - -static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - char *buf; - int ret, pos = 0, bufsz = sizeof(char) * 1024; - - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - struct iwl_bt_coex_profile_notif_old *notif = - &mvm->last_bt_notif_old; - - pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz); - - pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf+pos, - bufsz-pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf+pos, bufsz-pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); - } else { - struct iwl_bt_coex_profile_notif *notif = - &mvm->last_bt_notif; - - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); - - pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf+pos, - bufsz-pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf+pos, bufsz-pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); - } - - mutex_unlock(&mvm->mutex); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - - return ret; -} -#undef BT_MBOX_PRINT - -static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - char buf[256]; - int bufsz = sizeof(buf); - int pos = 0; - - mutex_lock(&mvm->mutex); - - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; - - pos += scnprintf(buf+pos, bufsz-pos, - "Channel inhibition CMD\n"); - pos += scnprintf(buf+pos, bufsz-pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf+pos, bufsz-pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - - pos += scnprintf(buf+pos, bufsz-pos, - "BT Configuration CMD - 0=default, 1=never, 2=always\n"); - pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n", - mvm->bt_ack_kill_msk[0]); - pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n", - mvm->bt_cts_kill_msk[0]); - - } else { - struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; - - pos += scnprintf(buf+pos, bufsz-pos, - "Channel inhibition CMD\n"); - pos += scnprintf(buf+pos, bufsz-pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf+pos, bufsz-pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - } - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - u32 bt_tx_prio; - - if (sscanf(buf, "%u", &bt_tx_prio) != 1) - return -EINVAL; - if (bt_tx_prio > 4) - return -EINVAL; - - mvm->bt_tx_prio = bt_tx_prio; - - return count; -} - -static ssize_t -iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - static const char * const modes_str[BT_FORCE_ANT_MAX] = { - [BT_FORCE_ANT_DIS] = "dis", - [BT_FORCE_ANT_AUTO] = "auto", - [BT_FORCE_ANT_BT] = "bt", - [BT_FORCE_ANT_WIFI] = "wifi", - }; - int ret, bt_force_ant_mode; - - for (bt_force_ant_mode = 0; - bt_force_ant_mode < ARRAY_SIZE(modes_str); - bt_force_ant_mode++) { - if (!strcmp(buf, modes_str[bt_force_ant_mode])) - break; - } - - if (bt_force_ant_mode >= ARRAY_SIZE(modes_str)) - return -EINVAL; - - ret = 0; - mutex_lock(&mvm->mutex); - if (mvm->bt_force_ant_mode == bt_force_ant_mode) - goto out; - - mvm->bt_force_ant_mode = bt_force_ant_mode; - IWL_DEBUG_COEX(mvm, "Force mode: %s\n", - modes_str[mvm->bt_force_ant_mode]); - ret = iwl_send_bt_init_conf(mvm); - -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -#define PRINT_STATS_LE32(_struct, _memb) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - fmt_table, #_memb, \ - le32_to_cpu(_struct->_memb)) - -static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - static const char *fmt_table = "\t%-30s %10u\n"; - static const char *fmt_header = "%-32s\n"; - int pos = 0; - char *buf; - int ret; - /* 43 is the size of each data line, 33 is the size of each header */ - size_t bufsz = - ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + - (4 * 33) + 1; - - struct mvm_statistics_rx_phy *ofdm; - struct mvm_statistics_rx_phy *cck; - struct mvm_statistics_rx_non_phy *general; - struct mvm_statistics_rx_ht_phy *ht; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - - ofdm = &mvm->rx_stats.ofdm; - cck = &mvm->rx_stats.cck; - general = &mvm->rx_stats.general; - ht = &mvm->rx_stats.ofdm_ht; - - pos += scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - OFDM"); - PRINT_STATS_LE32(ofdm, ina_cnt); - PRINT_STATS_LE32(ofdm, fina_cnt); - PRINT_STATS_LE32(ofdm, plcp_err); - PRINT_STATS_LE32(ofdm, crc32_err); - PRINT_STATS_LE32(ofdm, overrun_err); - PRINT_STATS_LE32(ofdm, early_overrun_err); - PRINT_STATS_LE32(ofdm, crc32_good); - PRINT_STATS_LE32(ofdm, false_alarm_cnt); - PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); - PRINT_STATS_LE32(ofdm, sfd_timeout); - PRINT_STATS_LE32(ofdm, fina_timeout); - PRINT_STATS_LE32(ofdm, unresponded_rts); - PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); - PRINT_STATS_LE32(ofdm, sent_ack_cnt); - PRINT_STATS_LE32(ofdm, sent_cts_cnt); - PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); - PRINT_STATS_LE32(ofdm, dsp_self_kill); - PRINT_STATS_LE32(ofdm, mh_format_err); - PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); - PRINT_STATS_LE32(ofdm, reserved); - - pos += scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - CCK"); - PRINT_STATS_LE32(cck, ina_cnt); - PRINT_STATS_LE32(cck, fina_cnt); - PRINT_STATS_LE32(cck, plcp_err); - PRINT_STATS_LE32(cck, crc32_err); - PRINT_STATS_LE32(cck, overrun_err); - PRINT_STATS_LE32(cck, early_overrun_err); - PRINT_STATS_LE32(cck, crc32_good); - PRINT_STATS_LE32(cck, false_alarm_cnt); - PRINT_STATS_LE32(cck, fina_sync_err_cnt); - PRINT_STATS_LE32(cck, sfd_timeout); - PRINT_STATS_LE32(cck, fina_timeout); - PRINT_STATS_LE32(cck, unresponded_rts); - PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); - PRINT_STATS_LE32(cck, sent_ack_cnt); - PRINT_STATS_LE32(cck, sent_cts_cnt); - PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); - PRINT_STATS_LE32(cck, dsp_self_kill); - PRINT_STATS_LE32(cck, mh_format_err); - PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); - PRINT_STATS_LE32(cck, reserved); - - pos += scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - GENERAL"); - PRINT_STATS_LE32(general, bogus_cts); - PRINT_STATS_LE32(general, bogus_ack); - PRINT_STATS_LE32(general, non_bssid_frames); - PRINT_STATS_LE32(general, filtered_frames); - PRINT_STATS_LE32(general, non_channel_beacons); - PRINT_STATS_LE32(general, channel_beacons); - PRINT_STATS_LE32(general, num_missed_bcon); - PRINT_STATS_LE32(general, adc_rx_saturation_time); - PRINT_STATS_LE32(general, ina_detection_search_time); - PRINT_STATS_LE32(general, beacon_silence_rssi_a); - PRINT_STATS_LE32(general, beacon_silence_rssi_b); - PRINT_STATS_LE32(general, beacon_silence_rssi_c); - PRINT_STATS_LE32(general, interference_data_flag); - PRINT_STATS_LE32(general, channel_load); - PRINT_STATS_LE32(general, dsp_false_alarms); - PRINT_STATS_LE32(general, beacon_rssi_a); - PRINT_STATS_LE32(general, beacon_rssi_b); - PRINT_STATS_LE32(general, beacon_rssi_c); - PRINT_STATS_LE32(general, beacon_energy_a); - PRINT_STATS_LE32(general, beacon_energy_b); - PRINT_STATS_LE32(general, beacon_energy_c); - PRINT_STATS_LE32(general, num_bt_kills); - PRINT_STATS_LE32(general, mac_id); - PRINT_STATS_LE32(general, directed_data_mpdu); - - pos += scnprintf(buf + pos, bufsz - pos, fmt_header, - "Statistics_Rx - HT"); - PRINT_STATS_LE32(ht, plcp_err); - PRINT_STATS_LE32(ht, overrun_err); - PRINT_STATS_LE32(ht, early_overrun_err); - PRINT_STATS_LE32(ht, crc32_good); - PRINT_STATS_LE32(ht, crc32_err); - PRINT_STATS_LE32(ht, mh_format_err); - PRINT_STATS_LE32(ht, agg_crc32_good); - PRINT_STATS_LE32(ht, agg_mpdu_cnt); - PRINT_STATS_LE32(ht, agg_cnt); - PRINT_STATS_LE32(ht, unsupport_mcs); - - mutex_unlock(&mvm->mutex); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - - return ret; -} -#undef PRINT_STAT_LE32 - -static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, - char __user *user_buf, size_t count, - loff_t *ppos, - struct iwl_mvm_frame_stats *stats) -{ - char *buff, *pos, *endpos; - int idx, i; - int ret; - static const size_t bufsz = 1024; - - buff = kmalloc(bufsz, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - spin_lock_bh(&mvm->drv_stats_lock); - - pos = buff; - endpos = pos + bufsz; - - pos += scnprintf(pos, endpos - pos, - "Legacy/HT/VHT\t:\t%d/%d/%d\n", - stats->legacy_frames, - stats->ht_frames, - stats->vht_frames); - pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", - stats->bw_20_frames, - stats->bw_40_frames, - stats->bw_80_frames); - pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", - stats->ngi_frames, - stats->sgi_frames); - pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", - stats->siso_frames, - stats->mimo2_frames); - pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", - stats->fail_frames, - stats->success_frames); - pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", - stats->agg_frames); - pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", - stats->ampdu_count); - pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", - stats->ampdu_count > 0 ? - (stats->agg_frames / stats->ampdu_count) : 0); - - pos += scnprintf(pos, endpos - pos, "Last Rates\n"); - - idx = stats->last_frame_idx - 1; - for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { - idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); - if (stats->last_rates[idx] == 0) - continue; - pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", - (int)(ARRAY_SIZE(stats->last_rates) - i)); - pos += rs_pretty_print_rate(pos, stats->last_rates[idx]); - } - spin_unlock_bh(&mvm->drv_stats_lock); - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); - kfree(buff); - - return ret; -} - -static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - - return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, - &mvm->drv_rx_stats); -} - -static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int ret; - - mutex_lock(&mvm->mutex); - - /* allow one more restart that we're provoking here */ - if (mvm->restart_fw >= 0) - mvm->restart_fw++; - - /* take the return value to make compiler happy - it will fail anyway */ - ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL); - - mutex_unlock(&mvm->mutex); - - return count; -} - -static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI); - if (ret) - return ret; - - iwl_force_nmi(mvm->trans); - - iwl_mvm_unref(mvm, IWL_MVM_REF_NMI); - - return count; -} - -static ssize_t -iwl_dbgfs_scan_ant_rxchain_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - int pos = 0; - char buf[32]; - const size_t bufsz = sizeof(buf); - - /* print which antennas were set for the scan command by the user */ - pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); - if (mvm->scan_rx_ant & ANT_A) - pos += scnprintf(buf + pos, bufsz - pos, "A"); - if (mvm->scan_rx_ant & ANT_B) - pos += scnprintf(buf + pos, bufsz - pos, "B"); - if (mvm->scan_rx_ant & ANT_C) - pos += scnprintf(buf + pos, bufsz - pos, "C"); - pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - u8 scan_rx_ant; - - if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) - return -EINVAL; - if (scan_rx_ant > ANT_ABC) - return -EINVAL; - if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) - return -EINVAL; - - if (mvm->scan_rx_ant != scan_rx_ant) { - mvm->scan_rx_ant = scan_rx_ant; - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - iwl_mvm_config_scan(mvm); - } - - return count; -} - -static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - int conf; - char buf[8]; - const size_t bufsz = sizeof(buf); - int pos = 0; - - mutex_lock(&mvm->mutex); - conf = mvm->fw_dbg_conf; - mutex_unlock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - unsigned int conf_id; - int ret; - - ret = kstrtouint(buf, 0, &conf_id); - if (ret) - return ret; - - if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) - return -EINVAL; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); - - if (ret) - return ret; - - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); - - return count; -} - -#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - const struct iwl_fw_bcast_filter *filter; - char *buf; - int bufsz = 1024; - int i, j, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; cmd.filters[i].attrs[0].mask; i++) { - filter = &cmd.filters[i]; - - ADD_TEXT("Filter [%d]:\n", i); - ADD_TEXT("\tDiscard=%d\n", filter->discard); - ADD_TEXT("\tFrame Type: %s\n", - filter->frame_type ? "IPv4" : "Generic"); - - for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { - const struct iwl_fw_bcast_filter_attr *attr; - - attr = &filter->attrs[j]; - if (!attr->mask) - break; - - ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", - j, attr->offset, - attr->offset_type ? "IP End" : - "Payload Start", - be32_to_cpu(attr->mask), - be32_to_cpu(attr->val), - le16_to_cpu(attr->reserved1)); - } - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int pos, next_pos; - struct iwl_fw_bcast_filter filter = {}; - struct iwl_bcast_filter_cmd cmd; - u32 filter_id, attr_id, mask, value; - int err = 0; - - if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, - &filter.frame_type, &pos) != 3) - return -EINVAL; - - if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || - filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) - return -EINVAL; - - for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); - attr_id++) { - struct iwl_fw_bcast_filter_attr *attr = - &filter.attrs[attr_id]; - - if (pos >= count) - break; - - if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", - &attr->offset, &attr->offset_type, - &mask, &value, &next_pos) != 4) - return -EINVAL; - - attr->mask = cpu_to_be32(mask); - attr->val = cpu_to_be32(value); - if (mask) - filter.num_attrs++; - - pos += next_pos; - } - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], - &filter, sizeof(filter)); - - /* send updated bcast filtering configuration */ - if (mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - char *buf; - int bufsz = 1024; - int i, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { - const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; - - ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", - i, mac->default_discard, mac->attached_filters); - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_bcast_filter_cmd cmd; - struct iwl_fw_bcast_mac mac = {}; - u32 mac_id, attached_filters; - int err = 0; - - if (!mvm->bcast_filters) - return -ENOENT; - - if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, - &attached_filters) != 3) - return -EINVAL; - - if (mac_id >= ARRAY_SIZE(cmd.macs) || - mac.default_discard > 1 || - attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) - return -EINVAL; - - mac.attached_filters = cpu_to_le16(attached_filters); - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], - &mac, sizeof(mac)); - - /* send updated bcast filtering configuration */ - if (mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} -#endif - -#ifdef CONFIG_PM_SLEEP -static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int store; - - if (sscanf(buf, "%d", &store) != 1) - return -EINVAL; - - mvm->store_d3_resume_sram = store; - - return count; -} - -static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - const struct fw_img *img; - int ofs, len, pos = 0; - size_t bufsz, ret; - char *buf; - u8 *ptr = mvm->d3_resume_sram; - - img = &mvm->fw->img[IWL_UCODE_WOWLAN]; - len = img->sec[IWL_UCODE_SECTION_DATA].len; - - bufsz = len * 4 + 256; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n", - mvm->store_d3_resume_sram ? "en" : "dis"); - - if (ptr) { - for (ofs = 0; ofs < len; ofs += 16) { - pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x %16ph\n", ofs, ptr + ofs); - } - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "(no data captured)\n"); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - - kfree(buf); - - return ret; -} -#endif - -#define PRINT_MVM_REF(ref) do { \ - if (mvm->refs[ref]) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t(0x%lx): %d %s\n", \ - BIT(ref), mvm->refs[ref], #ref); \ -} while (0) - -static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - int i, pos = 0; - char buf[256]; - const size_t bufsz = sizeof(buf); - u32 refs = 0; - - for (i = 0; i < IWL_MVM_REF_COUNT; i++) - if (mvm->refs[i]) - refs |= BIT(i); - - pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n", - refs); - - PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN); - PRINT_MVM_REF(IWL_MVM_REF_SCAN); - PRINT_MVM_REF(IWL_MVM_REF_ROC); - PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX); - PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); - PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); - PRINT_MVM_REF(IWL_MVM_REF_USER); - PRINT_MVM_REF(IWL_MVM_REF_TX); - PRINT_MVM_REF(IWL_MVM_REF_TX_AGG); - PRINT_MVM_REF(IWL_MVM_REF_ADD_IF); - PRINT_MVM_REF(IWL_MVM_REF_START_AP); - PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED); - PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX); - PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS); - PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL); - PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ); - PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE); - PRINT_MVM_REF(IWL_MVM_REF_NMI); - PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); - PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); - PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); - PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - unsigned long value; - int ret; - bool taken; - - ret = kstrtoul(buf, 10, &value); - if (ret < 0) - return ret; - - mutex_lock(&mvm->mutex); - - taken = mvm->refs[IWL_MVM_REF_USER]; - if (value == 1 && !taken) - iwl_mvm_ref(mvm, IWL_MVM_REF_USER); - else if (value == 0 && taken) - iwl_mvm_unref(mvm, IWL_MVM_REF_USER); - else - ret = -EINVAL; - - mutex_unlock(&mvm->mutex); - - if (ret < 0) - return ret; - return count; -} - -#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) -#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) -#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ - if (!debugfs_create_file(alias, mode, parent, mvm, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ - } while (0) -#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ - MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) - -static ssize_t -iwl_dbgfs_prph_reg_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - int pos = 0; - char buf[32]; - const size_t bufsz = sizeof(buf); - int ret; - - if (!mvm->dbgfs_prph_reg_addr) - return -EINVAL; - - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ); - if (ret) - return ret; - - pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", - mvm->dbgfs_prph_reg_addr, - iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - u8 args; - u32 value; - int ret; - - args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); - /* if we only want to set the reg address - nothing more to do */ - if (args == 1) - goto out; - - /* otherwise, make sure we have both address and value */ - if (args != 2) - return -EINVAL; - - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); - if (ret) - return ret; - - iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); -out: - return count; -} - -static ssize_t -iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); - -/* Device wide debugfs entries */ -MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); -MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); -MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); -MVM_DEBUGFS_READ_FILE_OPS(nic_temp); -MVM_DEBUGFS_READ_FILE_OPS(stations); -MVM_DEBUGFS_READ_FILE_OPS(bt_notif); -MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); -MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); -MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); -MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); -MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8); - -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); -#endif - -#ifdef CONFIG_PM_SLEEP -MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); -#endif - -int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) -{ - struct dentry *bcast_dir __maybe_unused; - char buf[100]; - - spin_lock_init(&mvm->drv_stats_lock); - - mvm->debugfs_dir = dbgfs_dir; - - MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, - S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, - S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); - if (!debugfs_create_bool("enable_scan_iteration_notif", - S_IRUSR | S_IWUSR, - mvm->debugfs_dir, - &mvm->scan_iter_notif_enabled)) - goto err; - -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { - bcast_dir = debugfs_create_dir("bcast_filtering", - mvm->debugfs_dir); - if (!bcast_dir) - goto err; - - if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR, - bcast_dir, - &mvm->dbgfs_bcast_filtering.override)) - goto err; - - MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, - bcast_dir, S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, - bcast_dir, S_IWUSR | S_IRUSR); - } -#endif - -#ifdef CONFIG_PM_SLEEP - MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR); - if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR, - mvm->debugfs_dir, &mvm->d3_wake_sysassert)) - goto err; - if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR, - mvm->debugfs_dir, &mvm->last_netdetect_scans)) - goto err; -#endif - - if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, - mvm->debugfs_dir, - &mvm->low_latency_agg_frame_limit)) - goto err; - if (!debugfs_create_u8("ps_disabled", S_IRUSR, - mvm->debugfs_dir, &mvm->ps_disabled)) - goto err; - if (!debugfs_create_blob("nvm_hw", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_hw_blob)) - goto err; - if (!debugfs_create_blob("nvm_sw", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_sw_blob)) - goto err; - if (!debugfs_create_blob("nvm_calib", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_calib_blob)) - goto err; - if (!debugfs_create_blob("nvm_prod", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_prod_blob)) - goto err; - if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) - goto err; - - /* - * Create a symlink with mac80211. It will be removed when mac80211 - * exists (before the opmode exists which removes the target.) - */ - snprintf(buf, 100, "../../%s/%s", - dbgfs_dir->d_parent->d_parent->d_name.name, - dbgfs_dir->d_parent->d_name.name); - if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf)) - goto err; - - return 0; -err: - IWL_ERR(mvm, "Can't create the mvm debugfs directory\n"); - return -ENOMEM; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h deleted file mode 100644 index 8c4190e7e027..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.h +++ /dev/null @@ -1,103 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#define MVM_DEBUGFS_READ_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -} - -#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ -static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ - const char __user *user_buf, \ - size_t count, loff_t *ppos) \ -{ \ - argtype *arg = file->private_data; \ - char buf[buflen] = {}; \ - size_t buf_size = min(count, sizeof(buf) - 1); \ - \ - if (copy_from_user(buf, user_buf, buf_size)) \ - return -EFAULT; \ - \ - return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \ -} \ - -#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ -MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .write = _iwl_dbgfs_##name##_write, \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - -#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ -MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .write = _iwl_dbgfs_##name##_write, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h deleted file mode 100644 index d398a6102805..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h +++ /dev/null @@ -1,476 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_bt_coex_h__ -#define __fw_api_bt_coex_h__ - -#include -#include - -#define BITS(nb) (BIT(nb) - 1) - -/** - * enum iwl_bt_coex_flags - flags for BT_COEX command - * @BT_COEX_MODE_POS: - * @BT_COEX_MODE_MSK: - * @BT_COEX_DISABLE_OLD: - * @BT_COEX_2W_OLD: - * @BT_COEX_3W_OLD: - * @BT_COEX_NW_OLD: - * @BT_COEX_AUTO_OLD: - * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests) - * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests) - * @BT_COEX_SYNC2SCO: - * @BT_COEX_CORUNNING: - * @BT_COEX_MPLUT: - * @BT_COEX_TTC: - * @BT_COEX_RRC: - * - * The COEX_MODE must be set for each command. Even if it is not changed. - */ -enum iwl_bt_coex_flags { - BT_COEX_MODE_POS = 3, - BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS, - BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS, - BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS, - BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS, - BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS, - BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS, - BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS, - BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS, - BT_COEX_SYNC2SCO = BIT(7), - BT_COEX_CORUNNING = BIT(8), - BT_COEX_MPLUT = BIT(9), - BT_COEX_TTC = BIT(20), - BT_COEX_RRC = BIT(21), -}; - -/* - * indicates what has changed in the BT_COEX command. - * BT_VALID_ENABLE must be set for each command. Commands without this bit will - * discarded by the firmware - */ -enum iwl_bt_coex_valid_bit_msk { - BT_VALID_ENABLE = BIT(0), - BT_VALID_BT_PRIO_BOOST = BIT(1), - BT_VALID_MAX_KILL = BIT(2), - BT_VALID_3W_TMRS = BIT(3), - BT_VALID_KILL_ACK = BIT(4), - BT_VALID_KILL_CTS = BIT(5), - BT_VALID_REDUCED_TX_POWER = BIT(6), - BT_VALID_LUT = BIT(7), - BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8), - BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9), - BT_VALID_MULTI_PRIO_LUT = BIT(10), - BT_VALID_TRM_KICK_FILTER = BIT(11), - BT_VALID_CORUN_LUT_20 = BIT(12), - BT_VALID_CORUN_LUT_40 = BIT(13), - BT_VALID_ANT_ISOLATION = BIT(14), - BT_VALID_ANT_ISOLATION_THRS = BIT(15), - BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16), - BT_VALID_TXRX_MAX_FREQ_0 = BIT(17), - BT_VALID_SYNC_TO_SCO = BIT(18), - BT_VALID_TTC = BIT(20), - BT_VALID_RRC = BIT(21), -}; - -/** - * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames. - * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames - * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames - * - * This mechanism allows to have BT and WiFi run concurrently. Since WiFi - * reduces its Tx power, it can work along with BT, hence reducing the amount - * of WiFi frames being killed by BT. - */ -enum iwl_bt_reduced_tx_power { - BT_REDUCED_TX_POWER_CTL = BIT(0), - BT_REDUCED_TX_POWER_DATA = BIT(1), -}; - -enum iwl_bt_coex_lut_type { - BT_COEX_TIGHT_LUT = 0, - BT_COEX_LOOSE_LUT, - BT_COEX_TX_DIS_LUT, - - BT_COEX_MAX_LUT, - BT_COEX_INVALID_LUT = 0xff, -}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ - -#define BT_COEX_LUT_SIZE (12) -#define BT_COEX_CORUN_LUT_SIZE (32) -#define BT_COEX_MULTI_PRIO_LUT_SIZE (2) -#define BT_COEX_BOOST_SIZE (4) -#define BT_REDUCED_TX_POWER_BIT BIT(7) - -/** - * struct iwl_bt_coex_cmd_old - bt coex configuration command - * @flags:&enum iwl_bt_coex_flags - * @max_kill: - * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power - * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT - * should be set by default - * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT - * should be set by default - * @bt4_antenna_isolation: antenna isolation - * @bt4_antenna_isolation_thr: antenna threshold value - * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency - * @bt4_tx_rx_max_freq0: TxRx max frequency - * @bt_prio_boost: BT priority boost registers - * @wifi_tx_prio_boost: SW boost of wifi tx priority - * @wifi_rx_prio_boost: SW boost of wifi rx priority - * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK. - * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS. - * @decision_lut: PTA decision LUT, per Prio-Ch - * @bt4_multiprio_lut: multi priority LUT configuration - * @bt4_corun_lut20: co-running 20 MHz LUT configuration - * @bt4_corun_lut40: co-running 40 MHz LUT configuration - * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk - * - * The structure is used for the BT_COEX command. - */ -struct iwl_bt_coex_cmd_old { - __le32 flags; - u8 max_kill; - u8 bt_reduced_tx_power; - u8 override_primary_lut; - u8 override_secondary_lut; - - u8 bt4_antenna_isolation; - u8 bt4_antenna_isolation_thr; - u8 bt4_tx_tx_delta_freq_thr; - u8 bt4_tx_rx_max_freq0; - - __le32 bt_prio_boost[BT_COEX_BOOST_SIZE]; - __le32 wifi_tx_prio_boost; - __le32 wifi_rx_prio_boost; - __le32 kill_ack_msk; - __le32 kill_cts_msk; - - __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE]; - __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE]; - __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE]; - __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE]; - - __le32 valid_bit_msk; -} __packed; /* BT_COEX_CMD_API_S_VER_5 */ - -enum iwl_bt_coex_mode { - BT_COEX_DISABLE = 0x0, - BT_COEX_NW = 0x1, - BT_COEX_BT = 0x2, - BT_COEX_WIFI = 0x3, -}; /* BT_COEX_MODES_E */ - -enum iwl_bt_coex_enabled_modules { - BT_COEX_MPLUT_ENABLED = BIT(0), - BT_COEX_MPLUT_BOOST_ENABLED = BIT(1), - BT_COEX_SYNC2SCO_ENABLED = BIT(2), - BT_COEX_CORUN_ENABLED = BIT(3), - BT_COEX_HIGH_BAND_RET = BIT(4), -}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */ - -/** - * struct iwl_bt_coex_cmd - bt coex configuration command - * @mode: enum %iwl_bt_coex_mode - * @enabled_modules: enum %iwl_bt_coex_enabled_modules - * - * The structure is used for the BT_COEX command. - */ -struct iwl_bt_coex_cmd { - __le32 mode; - __le32 enabled_modules; -} __packed; /* BT_COEX_CMD_API_S_VER_6 */ - -/** - * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut - * @corun_lut20: co-running 20 MHz LUT configuration - * @corun_lut40: co-running 40 MHz LUT configuration - * - * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command. - */ -struct iwl_bt_coex_corun_lut_update_cmd { - __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; - __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; -} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ - -/** - * struct iwl_bt_coex_reduced_txp_update_cmd - * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the - * bits are the sta_id (value) - */ -struct iwl_bt_coex_reduced_txp_update_cmd { - __le32 reduced_txp; -} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */ - -/** - * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command - * @bt_primary_ci: - * @primary_ch_phy_id: - * @bt_secondary_ci: - * @secondary_ch_phy_id: - * - * Used for BT_COEX_CI command - */ -struct iwl_bt_coex_ci_cmd { - __le64 bt_primary_ci; - __le32 primary_ch_phy_id; - - __le64 bt_secondary_ci; - __le32 secondary_ch_phy_id; -} __packed; /* BT_CI_MSG_API_S_VER_2 */ - -#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ - BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ - BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS - -enum iwl_bt_mxbox_dw0 { - BT_MBOX(0, LE_SLAVE_LAT, 0, 3), - BT_MBOX(0, LE_PROF1, 3, 1), - BT_MBOX(0, LE_PROF2, 4, 1), - BT_MBOX(0, LE_PROF_OTHER, 5, 1), - BT_MBOX(0, CHL_SEQ_N, 8, 4), - BT_MBOX(0, INBAND_S, 13, 1), - BT_MBOX(0, LE_MIN_RSSI, 16, 4), - BT_MBOX(0, LE_SCAN, 20, 1), - BT_MBOX(0, LE_ADV, 21, 1), - BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), - BT_MBOX(0, OPEN_CON_1, 28, 2), -}; - -enum iwl_bt_mxbox_dw1 { - BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), - BT_MBOX(1, IP_SR, 4, 1), - BT_MBOX(1, LE_MSTR, 5, 1), - BT_MBOX(1, AGGR_TRFC_LD, 8, 6), - BT_MBOX(1, MSG_TYPE, 16, 3), - BT_MBOX(1, SSN, 19, 2), -}; - -enum iwl_bt_mxbox_dw2 { - BT_MBOX(2, SNIFF_ACT, 0, 3), - BT_MBOX(2, PAG, 3, 1), - BT_MBOX(2, INQUIRY, 4, 1), - BT_MBOX(2, CONN, 5, 1), - BT_MBOX(2, SNIFF_INTERVAL, 8, 5), - BT_MBOX(2, DISC, 13, 1), - BT_MBOX(2, SCO_TX_ACT, 16, 2), - BT_MBOX(2, SCO_RX_ACT, 18, 2), - BT_MBOX(2, ESCO_RE_TX, 20, 2), - BT_MBOX(2, SCO_DURATION, 24, 6), -}; - -enum iwl_bt_mxbox_dw3 { - BT_MBOX(3, SCO_STATE, 0, 1), - BT_MBOX(3, SNIFF_STATE, 1, 1), - BT_MBOX(3, A2DP_STATE, 2, 1), - BT_MBOX(3, ACL_STATE, 3, 1), - BT_MBOX(3, MSTR_STATE, 4, 1), - BT_MBOX(3, OBX_STATE, 5, 1), - BT_MBOX(3, OPEN_CON_2, 8, 2), - BT_MBOX(3, TRAFFIC_LOAD, 10, 2), - BT_MBOX(3, CHL_SEQN_LSB, 12, 1), - BT_MBOX(3, INBAND_P, 13, 1), - BT_MBOX(3, MSG_TYPE_2, 16, 3), - BT_MBOX(3, SSN_2, 19, 2), - BT_MBOX(3, UPDATE_REQUEST, 21, 1), -}; - -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - -enum iwl_bt_activity_grading { - BT_OFF = 0, - BT_ON_NO_CONNECTION = 1, - BT_LOW_TRAFFIC = 2, - BT_HIGH_TRAFFIC = 3, - - BT_MAX_AG, -}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */ - -enum iwl_bt_ci_compliance { - BT_CI_COMPLIANCE_NONE = 0, - BT_CI_COMPLIANCE_PRIMARY = 1, - BT_CI_COMPLIANCE_SECONDARY = 2, - BT_CI_COMPLIANCE_BOTH = 3, -}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ - -#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \ - (_ttc_rrc_status & BIT(_phy_id)) - -#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \ - ((_ttc_rrc_status >> 4) & BIT(_phy_id)) - -/** - * struct iwl_bt_coex_profile_notif - notification about BT coex - * @mbox_msg: message from BT to WiFi - * @msg_idx: the index of the message - * @bt_ci_compliance: enum %iwl_bt_ci_compliance - * @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type - * @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type - * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading - * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY - */ -struct iwl_bt_coex_profile_notif { - __le32 mbox_msg[4]; - __le32 msg_idx; - __le32 bt_ci_compliance; - - __le32 primary_ch_lut; - __le32 secondary_ch_lut; - __le32 bt_activity_grading; - u8 ttc_rrc_status; - u8 reserved[3]; -} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ - -enum iwl_bt_coex_prio_table_event { - BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5, - BT_COEX_PRIO_TBL_EVT_DTIM = 6, - BT_COEX_PRIO_TBL_EVT_SCAN52 = 7, - BT_COEX_PRIO_TBL_EVT_SCAN24 = 8, - BT_COEX_PRIO_TBL_EVT_IDLE = 9, - BT_COEX_PRIO_TBL_EVT_MAX = 16, -}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */ - -enum iwl_bt_coex_prio_table_prio { - BT_COEX_PRIO_TBL_DISABLED = 0, - BT_COEX_PRIO_TBL_PRIO_LOW = 1, - BT_COEX_PRIO_TBL_PRIO_HIGH = 2, - BT_COEX_PRIO_TBL_PRIO_BYPASS = 3, - BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4, - BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5, - BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6, - BT_COEX_PRIO_TBL_MAX = 8, -}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */ - -#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0) -#define BT_COEX_PRIO_TBL_PRIO_POS (1) -#define BT_COEX_PRIO_TBL_RESERVED_POS (4) - -/** - * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex - * @prio_tbl: - */ -struct iwl_bt_coex_prio_tbl_cmd { - u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX]; -} __packed; - -/** - * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command - * @bt_primary_ci: - * @bt_secondary_ci: - * @co_run_bw_primary: - * @co_run_bw_secondary: - * @primary_ch_phy_id: - * @secondary_ch_phy_id: - * - * Used for BT_COEX_CI command - */ -struct iwl_bt_coex_ci_cmd_old { - __le64 bt_primary_ci; - __le64 bt_secondary_ci; - - u8 co_run_bw_primary; - u8 co_run_bw_secondary; - u8 primary_ch_phy_id; - u8 secondary_ch_phy_id; -} __packed; /* BT_CI_MSG_API_S_VER_1 */ - -/** - * struct iwl_bt_coex_profile_notif_old - notification about BT coex - * @mbox_msg: message from BT to WiFi - * @msg_idx: the index of the message - * @bt_status: 0 - off, 1 - on - * @bt_open_conn: number of BT connections open - * @bt_traffic_load: load of BT traffic - * @bt_agg_traffic_load: aggregated load of BT traffic - * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant - * @primary_ch_lut: LUT used for primary channel - * @secondary_ch_lut: LUT used for secondary channel - * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading - */ -struct iwl_bt_coex_profile_notif_old { - __le32 mbox_msg[4]; - __le32 msg_idx; - u8 bt_status; - u8 bt_open_conn; - u8 bt_traffic_load; - u8 bt_agg_traffic_load; - u8 bt_ci_compliance; - u8 ttc_enabled; - u8 rrc_enabled; - u8 reserved; - - __le32 primary_ch_lut; - __le32 secondary_ch_lut; - __le32 bt_activity_grading; -} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */ - -#endif /* __fw_api_bt_coex_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h deleted file mode 100644 index 20521bebb0b1..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ /dev/null @@ -1,425 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_d3_h__ -#define __fw_api_d3_h__ - -/** - * enum iwl_d3_wakeup_flags - D3 manager wakeup flags - * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert - */ -enum iwl_d3_wakeup_flags { - IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), -}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ - -/** - * struct iwl_d3_manager_config - D3 manager configuration command - * @min_sleep_time: minimum sleep time (in usec) - * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags - * @wakeup_host_timer: force wakeup after this many seconds - * - * The structure is used for the D3_CONFIG_CMD command. - */ -struct iwl_d3_manager_config { - __le32 min_sleep_time; - __le32 wakeup_flags; - __le32 wakeup_host_timer; -} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ - - -/* TODO: OFFLOADS_QUERY_API_S_VER_1 */ - -/** - * enum iwl_d3_proto_offloads - enabled protocol offloads - * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled - * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled - */ -enum iwl_proto_offloads { - IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), - IWL_D3_PROTO_OFFLOAD_NS = BIT(1), -}; - -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12 - -#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4 -#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2 - -/** - * struct iwl_proto_offload_cmd_common - ARP/NS offload common part - * @enabled: enable flags - * @remote_ipv4_addr: remote address to answer to (or zero if all) - * @host_ipv4_addr: our IPv4 address to respond to queries for - * @arp_mac_addr: our MAC address for ARP responses - * @reserved: unused - */ -struct iwl_proto_offload_cmd_common { - __le32 enabled; - __be32 remote_ipv4_addr; - __be32 host_ipv4_addr; - u8 arp_mac_addr[ETH_ALEN]; - __le16 reserved; -} __packed; - -/** - * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @remote_ipv6_addr: remote address to answer to (or zero if all) - * @solicited_node_ipv6_addr: broken -- solicited node address exists - * for each target address - * @target_ipv6_addr: our target addresses - * @ndp_mac_addr: neighbor solicitation response MAC address - */ -struct iwl_proto_offload_cmd_v1 { - struct iwl_proto_offload_cmd_common common; - u8 remote_ipv6_addr[16]; - u8 solicited_node_ipv6_addr[16]; - u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; - u8 ndp_mac_addr[ETH_ALEN]; - __le16 reserved2; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ - -/** - * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @remote_ipv6_addr: remote address to answer to (or zero if all) - * @solicited_node_ipv6_addr: broken -- solicited node address exists - * for each target address - * @target_ipv6_addr: our target addresses - * @ndp_mac_addr: neighbor solicitation response MAC address - */ -struct iwl_proto_offload_cmd_v2 { - struct iwl_proto_offload_cmd_common common; - u8 remote_ipv6_addr[16]; - u8 solicited_node_ipv6_addr[16]; - u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; - u8 ndp_mac_addr[ETH_ALEN]; - u8 numValidIPv6Addresses; - u8 reserved2[3]; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ - -struct iwl_ns_config { - struct in6_addr source_ipv6_addr; - struct in6_addr dest_ipv6_addr; - u8 target_mac_addr[ETH_ALEN]; - __le16 reserved; -} __packed; /* NS_OFFLOAD_CONFIG */ - -struct iwl_targ_addr { - struct in6_addr addr; - __le32 config_num; -} __packed; /* TARGET_IPV6_ADDRESS */ - -/** - * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @target_ipv6_addr: target IPv6 addresses - * @ns_config: NS offload configurations - */ -struct iwl_proto_offload_cmd_v3_small { - struct iwl_proto_offload_cmd_common common; - __le32 num_valid_ipv6_addrs; - struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S]; - struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S]; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ - -/** - * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @target_ipv6_addr: target IPv6 addresses - * @ns_config: NS offload configurations - */ -struct iwl_proto_offload_cmd_v3_large { - struct iwl_proto_offload_cmd_common common; - __le32 num_valid_ipv6_addrs; - struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; - struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ - -/* - * WOWLAN_PATTERNS - */ -#define IWL_WOWLAN_MIN_PATTERN_LEN 16 -#define IWL_WOWLAN_MAX_PATTERN_LEN 128 - -struct iwl_wowlan_pattern { - u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; - u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; - u8 mask_size; - u8 pattern_size; - __le16 reserved; -} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ - -#define IWL_WOWLAN_MAX_PATTERNS 20 - -struct iwl_wowlan_patterns_cmd { - __le32 n_patterns; - struct iwl_wowlan_pattern patterns[]; -} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ - -enum iwl_wowlan_wakeup_filters { - IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), - IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), - IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), - IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), - IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), - IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), - IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), - IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), - IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), - IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), - IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), - IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11), - IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), - IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13), - IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14), - IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15), - IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), -}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ - -struct iwl_wowlan_config_cmd { - __le32 wakeup_filter; - __le16 non_qos_seq; - __le16 qos_seq[8]; - u8 wowlan_ba_teardown_tids; - u8 is_11n_connection; - u8 offloading_tid; - u8 reserved[3]; -} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */ - -/* - * WOWLAN_TSC_RSC_PARAMS - */ -#define IWL_NUM_RSC 16 - -struct tkip_sc { - __le16 iv16; - __le16 pad; - __le32 iv32; -} __packed; /* TKIP_SC_API_U_VER_1 */ - -struct iwl_tkip_rsc_tsc { - struct tkip_sc unicast_rsc[IWL_NUM_RSC]; - struct tkip_sc multicast_rsc[IWL_NUM_RSC]; - struct tkip_sc tsc; -} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ - -struct aes_sc { - __le64 pn; -} __packed; /* TKIP_AES_SC_API_U_VER_1 */ - -struct iwl_aes_rsc_tsc { - struct aes_sc unicast_rsc[IWL_NUM_RSC]; - struct aes_sc multicast_rsc[IWL_NUM_RSC]; - struct aes_sc tsc; -} __packed; /* AES_TSC_RSC_API_S_VER_1 */ - -union iwl_all_tsc_rsc { - struct iwl_tkip_rsc_tsc tkip; - struct iwl_aes_rsc_tsc aes; -}; /* ALL_TSC_RSC_API_S_VER_2 */ - -struct iwl_wowlan_rsc_tsc_params_cmd { - union iwl_all_tsc_rsc all_tsc_rsc; -} __packed; /* ALL_TSC_RSC_API_S_VER_2 */ - -#define IWL_MIC_KEY_SIZE 8 -struct iwl_mic_keys { - u8 tx[IWL_MIC_KEY_SIZE]; - u8 rx_unicast[IWL_MIC_KEY_SIZE]; - u8 rx_mcast[IWL_MIC_KEY_SIZE]; -} __packed; /* MIC_KEYS_API_S_VER_1 */ - -#define IWL_P1K_SIZE 5 -struct iwl_p1k_cache { - __le16 p1k[IWL_P1K_SIZE]; -} __packed; - -#define IWL_NUM_RX_P1K_CACHE 2 - -struct iwl_wowlan_tkip_params_cmd { - struct iwl_mic_keys mic_keys; - struct iwl_p1k_cache tx; - struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; - struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; -} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ - -#define IWL_KCK_MAX_SIZE 32 -#define IWL_KEK_MAX_SIZE 32 - -struct iwl_wowlan_kek_kck_material_cmd { - u8 kck[IWL_KCK_MAX_SIZE]; - u8 kek[IWL_KEK_MAX_SIZE]; - __le16 kck_len; - __le16 kek_len; - __le64 replay_ctr; -} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ - -#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 - -enum iwl_wowlan_rekey_status { - IWL_WOWLAN_REKEY_POST_REKEY = 0, - IWL_WOWLAN_REKEY_WHILE_REKEY = 1, -}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ - -enum iwl_wowlan_wakeup_reason { - IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, - IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), - IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), - IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), - IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), - IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), - IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), - IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), - IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), - IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), - IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), - IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), - -}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ - -struct iwl_wowlan_gtk_status { - u8 key_index; - u8 reserved[3]; - u8 decrypt_key[16]; - u8 tkip_mic_key[8]; - struct iwl_wowlan_rsc_tsc_params_cmd rsc; -} __packed; - -struct iwl_wowlan_status { - struct iwl_wowlan_gtk_status gtk; - __le64 replay_ctr; - __le16 pattern_number; - __le16 non_qos_seq_ctr; - __le16 qos_seq_ctr[8]; - __le32 wakeup_reasons; - __le32 num_of_gtk_rekeys; - __le32 transmitted_ndps; - __le32 received_beacons; - __le32 wake_packet_length; - __le32 wake_packet_bufsize; - u8 wake_packet[]; /* can be truncated from _length to _bufsize */ -} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ - -#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 -#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 -#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048 - -struct iwl_tcp_packet_info { - __le16 tcp_pseudo_header_checksum; - __le16 tcp_payload_length; -} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */ - -struct iwl_tcp_packet { - struct iwl_tcp_packet_info info; - u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; - u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN]; -} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ - -struct iwl_remote_wake_packet { - struct iwl_tcp_packet_info info; - u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; - u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN]; -} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ - -struct iwl_wowlan_remote_wake_config { - __le32 connection_max_time; /* unused */ - /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */ - u8 max_syn_retries; - u8 max_data_retries; - u8 tcp_syn_ack_timeout; - u8 tcp_ack_timeout; - - struct iwl_tcp_packet syn_tx; - struct iwl_tcp_packet synack_rx; - struct iwl_tcp_packet keepalive_ack_rx; - struct iwl_tcp_packet fin_tx; - - struct iwl_remote_wake_packet keepalive_tx; - struct iwl_remote_wake_packet wake_rx; - - /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */ - u8 sequence_number_offset; - u8 sequence_number_length; - u8 token_offset; - u8 token_length; - /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */ - __le32 initial_sequence_number; - __le16 keepalive_interval; - __le16 num_tokens; - u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS]; -} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */ - -/* TODO: NetDetect API */ - -#endif /* __fw_api_d3_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h deleted file mode 100644 index f3f3ee0a766b..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h +++ /dev/null @@ -1,387 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_mac_h__ -#define __fw_api_mac_h__ - -/* - * The first MAC indices (starting from 0) - * are available to the driver, AUX follows - */ -#define MAC_INDEX_AUX 4 -#define MAC_INDEX_MIN_DRIVER 0 -#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX -#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1) - -enum iwl_ac { - AC_BK, - AC_BE, - AC_VI, - AC_VO, - AC_NUM, -}; - -/** - * enum iwl_mac_protection_flags - MAC context flags - * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, - * this will require CCK RTS/CTS2self. - * RTS/CTS will protect full burst time. - * @MAC_PROT_FLG_HT_PROT: enable HT protection - * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions - * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self - */ -enum iwl_mac_protection_flags { - MAC_PROT_FLG_TGG_PROTECT = BIT(3), - MAC_PROT_FLG_HT_PROT = BIT(23), - MAC_PROT_FLG_FAT_PROT = BIT(24), - MAC_PROT_FLG_SELF_CTS_EN = BIT(30), -}; - -#define MAC_FLG_SHORT_SLOT BIT(4) -#define MAC_FLG_SHORT_PREAMBLE BIT(5) - -/** - * enum iwl_mac_types - Supported MAC types - * @FW_MAC_TYPE_FIRST: lowest supported MAC type - * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) - * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) - * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS - * @FW_MAC_TYPE_IBSS: IBSS - * @FW_MAC_TYPE_BSS_STA: BSS (managed) station - * @FW_MAC_TYPE_P2P_DEVICE: P2P Device - * @FW_MAC_TYPE_P2P_STA: P2P client - * @FW_MAC_TYPE_GO: P2P GO - * @FW_MAC_TYPE_TEST: ? - * @FW_MAC_TYPE_MAX: highest support MAC type - */ -enum iwl_mac_types { - FW_MAC_TYPE_FIRST = 1, - FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, - FW_MAC_TYPE_LISTENER, - FW_MAC_TYPE_PIBSS, - FW_MAC_TYPE_IBSS, - FW_MAC_TYPE_BSS_STA, - FW_MAC_TYPE_P2P_DEVICE, - FW_MAC_TYPE_P2P_STA, - FW_MAC_TYPE_GO, - FW_MAC_TYPE_TEST, - FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST -}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ - -/** - * enum iwl_tsf_id - TSF hw timer ID - * @TSF_ID_A: use TSF A - * @TSF_ID_B: use TSF B - * @TSF_ID_C: use TSF C - * @TSF_ID_D: use TSF D - * @NUM_TSF_IDS: number of TSF timers available - */ -enum iwl_tsf_id { - TSF_ID_A = 0, - TSF_ID_B = 1, - TSF_ID_C = 2, - TSF_ID_D = 3, - NUM_TSF_IDS = 4, -}; /* TSF_ID_API_E_VER_1 */ - -/** - * struct iwl_mac_data_ap - configuration data for AP MAC context - * @beacon_time: beacon transmit time in system time - * @beacon_tsf: beacon transmit time in TSF - * @bi: beacon interval in TU - * @bi_reciprocal: 2^32 / bi - * @dtim_interval: dtim transmit time in TU - * @dtim_reciprocal: 2^32 / dtim_interval - * @mcast_qid: queue ID for multicast traffic - * @beacon_template: beacon template ID - */ -struct iwl_mac_data_ap { - __le32 beacon_time; - __le64 beacon_tsf; - __le32 bi; - __le32 bi_reciprocal; - __le32 dtim_interval; - __le32 dtim_reciprocal; - __le32 mcast_qid; - __le32 beacon_template; -} __packed; /* AP_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_ibss - configuration data for IBSS MAC context - * @beacon_time: beacon transmit time in system time - * @beacon_tsf: beacon transmit time in TSF - * @bi: beacon interval in TU - * @bi_reciprocal: 2^32 / bi - * @beacon_template: beacon template ID - */ -struct iwl_mac_data_ibss { - __le32 beacon_time; - __le64 beacon_tsf; - __le32 bi; - __le32 bi_reciprocal; - __le32 beacon_template; -} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_sta - configuration data for station MAC context - * @is_assoc: 1 for associated state, 0 otherwise - * @dtim_time: DTIM arrival time in system time - * @dtim_tsf: DTIM arrival time in TSF - * @bi: beacon interval in TU, applicable only when associated - * @bi_reciprocal: 2^32 / bi , applicable only when associated - * @dtim_interval: DTIM interval in TU, applicable only when associated - * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated - * @listen_interval: in beacon intervals, applicable only when associated - * @assoc_id: unique ID assigned by the AP during association - */ -struct iwl_mac_data_sta { - __le32 is_assoc; - __le32 dtim_time; - __le64 dtim_tsf; - __le32 bi; - __le32 bi_reciprocal; - __le32 dtim_interval; - __le32 dtim_reciprocal; - __le32 listen_interval; - __le32 assoc_id; - __le32 assoc_beacon_arrive_time; -} __packed; /* STA_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_go - configuration data for P2P GO MAC context - * @ap: iwl_mac_data_ap struct with most config data - * @ctwin: client traffic window in TU (period after TBTT when GO is present). - * 0 indicates that there is no CT window. - * @opp_ps_enabled: indicate that opportunistic PS allowed - */ -struct iwl_mac_data_go { - struct iwl_mac_data_ap ap; - __le32 ctwin; - __le32 opp_ps_enabled; -} __packed; /* GO_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context - * @sta: iwl_mac_data_sta struct with most config data - * @ctwin: client traffic window in TU (period after TBTT when GO is present). - * 0 indicates that there is no CT window. - */ -struct iwl_mac_data_p2p_sta { - struct iwl_mac_data_sta sta; - __le32 ctwin; -} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_pibss - Pseudo IBSS config data - * @stats_interval: interval in TU between statistics notifications to host. - */ -struct iwl_mac_data_pibss { - __le32 stats_interval; -} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ - -/* - * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC - * context. - * @is_disc_extended: if set to true, P2P Device discoverability is enabled on - * other channels as well. This should be to true only in case that the - * device is discoverable and there is an active GO. Note that setting this - * field when not needed, will increase the number of interrupts and have - * effect on the platform power, as this setting opens the Rx filters on - * all macs. - */ -struct iwl_mac_data_p2p_dev { - __le32 is_disc_extended; -} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ - -/** - * enum iwl_mac_filter_flags - MAC context filter flags - * @MAC_FILTER_IN_PROMISC: accept all data frames - * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and - * control frames to the host - * @MAC_FILTER_ACCEPT_GRP: accept multicast frames - * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames - * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames - * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host - * (in station mode when associated) - * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames - * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames - * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host - */ -enum iwl_mac_filter_flags { - MAC_FILTER_IN_PROMISC = BIT(0), - MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), - MAC_FILTER_ACCEPT_GRP = BIT(2), - MAC_FILTER_DIS_DECRYPT = BIT(3), - MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), - MAC_FILTER_IN_BEACON = BIT(6), - MAC_FILTER_OUT_BCAST = BIT(8), - MAC_FILTER_IN_CRC32 = BIT(11), - MAC_FILTER_IN_PROBE_REQUEST = BIT(12), -}; - -/** - * enum iwl_mac_qos_flags - QoS flags - * @MAC_QOS_FLG_UPDATE_EDCA: ? - * @MAC_QOS_FLG_TGN: HT is enabled - * @MAC_QOS_FLG_TXOP_TYPE: ? - * - */ -enum iwl_mac_qos_flags { - MAC_QOS_FLG_UPDATE_EDCA = BIT(0), - MAC_QOS_FLG_TGN = BIT(1), - MAC_QOS_FLG_TXOP_TYPE = BIT(4), -}; - -/** - * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD - * @cw_min: Contention window, start value in numbers of slots. - * Should be a power-of-2, minus 1. Device's default is 0x0f. - * @cw_max: Contention window, max value in numbers of slots. - * Should be a power-of-2, minus 1. Device's default is 0x3f. - * @aifsn: Number of slots in Arbitration Interframe Space (before - * performing random backoff timing prior to Tx). Device default 1. - * @fifos_mask: FIFOs used by this MAC for this AC - * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. - * - * One instance of this config struct for each of 4 EDCA access categories - * in struct iwl_qosparam_cmd. - * - * Device will automatically increase contention window by (2*CW) + 1 for each - * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW - * value, to cap the CW value. - */ -struct iwl_ac_qos { - __le16 cw_min; - __le16 cw_max; - u8 aifsn; - u8 fifos_mask; - __le16 edca_txop; -} __packed; /* AC_QOS_API_S_VER_2 */ - -/** - * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts - * ( MAC_CONTEXT_CMD = 0x28 ) - * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @mac_type: one of FW_MAC_TYPE_* - * @tsd_id: TSF HW timer, one of TSF_ID_* - * @node_addr: MAC address - * @bssid_addr: BSSID - * @cck_rates: basic rates available for CCK - * @ofdm_rates: basic rates available for OFDM - * @protection_flags: combination of MAC_PROT_FLG_FLAG_* - * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise - * @short_slot: 0x10 for enabling short slots, 0 otherwise - * @filter_flags: combination of MAC_FILTER_* - * @qos_flags: from MAC_QOS_FLG_* - * @ac: one iwl_mac_qos configuration for each AC - * @mac_specific: one of struct iwl_mac_data_*, according to mac_type - */ -struct iwl_mac_ctx_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ - __le32 mac_type; - __le32 tsf_id; - u8 node_addr[6]; - __le16 reserved_for_node_addr; - u8 bssid_addr[6]; - __le16 reserved_for_bssid_addr; - __le32 cck_rates; - __le32 ofdm_rates; - __le32 protection_flags; - __le32 cck_short_preamble; - __le32 short_slot; - __le32 filter_flags; - /* MAC_QOS_PARAM_API_S_VER_1 */ - __le32 qos_flags; - struct iwl_ac_qos ac[AC_NUM+1]; - /* MAC_CONTEXT_COMMON_DATA_API_S */ - union { - struct iwl_mac_data_ap ap; - struct iwl_mac_data_go go; - struct iwl_mac_data_sta sta; - struct iwl_mac_data_p2p_sta p2p_sta; - struct iwl_mac_data_p2p_dev p2p_dev; - struct iwl_mac_data_pibss pibss; - struct iwl_mac_data_ibss ibss; - }; -} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ - -static inline u32 iwl_mvm_reciprocal(u32 v) -{ - if (!v) - return 0; - return 0xFFFFFFFF / v; -} - -#define IWL_NONQOS_SEQ_GET 0x1 -#define IWL_NONQOS_SEQ_SET 0x2 -struct iwl_nonqos_seq_query_cmd { - __le32 get_set_flag; - __le32 mac_id_n_color; - __le16 value; - __le16 reserved; -} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ - -#endif /* __fw_api_mac_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h deleted file mode 100644 index c8f3e2536cbb..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ /dev/null @@ -1,467 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_power_h__ -#define __fw_api_power_h__ - -/* Power Management Commands, Responses, Notifications */ - -/** - * enum iwl_ltr_config_flags - masks for LTR config command flags - * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status - * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow - * memory access - * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR - * reg change - * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from - * D0 to D3 - * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register - * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register - * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD - */ -enum iwl_ltr_config_flags { - LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), - LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), - LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), - LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), - LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), - LTR_CFG_FLAG_SW_SET_LONG = BIT(5), - LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), -}; - -/** - * struct iwl_ltr_config_cmd_v1 - configures the LTR - * @flags: See %enum iwl_ltr_config_flags - */ -struct iwl_ltr_config_cmd_v1 { - __le32 flags; - __le32 static_long; - __le32 static_short; -} __packed; /* LTR_CAPABLE_API_S_VER_1 */ - -#define LTR_VALID_STATES_NUM 4 - -/** - * struct iwl_ltr_config_cmd - configures the LTR - * @flags: See %enum iwl_ltr_config_flags - * @static_long: - * @static_short: - * @ltr_cfg_values: - * @ltr_short_idle_timeout: - */ -struct iwl_ltr_config_cmd { - __le32 flags; - __le32 static_long; - __le32 static_short; - __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; - __le32 ltr_short_idle_timeout; -} __packed; /* LTR_CAPABLE_API_S_VER_2 */ - -/* Radio LP RX Energy Threshold measured in dBm */ -#define POWER_LPRX_RSSI_THRESHOLD 75 -#define POWER_LPRX_RSSI_THRESHOLD_MAX 94 -#define POWER_LPRX_RSSI_THRESHOLD_MIN 30 - -/** - * enum iwl_power_flags - masks for power table command flags - * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off - * receiver and transmitter. '0' - does not allow. - * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, - * '1' Driver enables PM (use rest of parameters) - * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, - * '1' PM could sleep over DTIM till listen Interval. - * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all - * access categories are both delivery and trigger enabled. - * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and - * PBW Snoozing enabled - * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask - * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. - * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving - * detection enablement -*/ -enum iwl_power_flags { - POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), - POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), - POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), - POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), - POWER_FLAGS_BT_SCO_ENA = BIT(8), - POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), - POWER_FLAGS_LPRX_ENA_MSK = BIT(11), - POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), -}; - -#define IWL_POWER_VEC_SIZE 5 - -/** - * struct iwl_powertable_cmd - legacy power command. Beside old API support this - * is used also with a new power API for device wide power settings. - * POWER_TABLE_CMD = 0x77 (command, has simple generic response) - * - * @flags: Power table command flags from POWER_FLAGS_* - * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. - * Minimum allowed:- 3 * DTIM. Keep alive period must be - * set regardless of power scheme or current power state. - * FW use this value also when PM is disabled. - * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to - * PSM transition - legacy PM - * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to - * PSM transition - legacy PM - * @sleep_interval: not in use - * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag - * is set. For example, if it is required to skip over - * one DTIM, this value need to be set to 2 (DTIM periods). - * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. - * Default: 80dbm - */ -struct iwl_powertable_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_6 */ - __le16 flags; - u8 keep_alive_seconds; - u8 debug_flags; - __le32 rx_data_timeout; - __le32 tx_data_timeout; - __le32 sleep_interval[IWL_POWER_VEC_SIZE]; - __le32 skip_dtim_periods; - __le32 lprx_rssi_threshold; -} __packed; - -/** - * enum iwl_device_power_flags - masks for device power command flags - * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off - * receiver and transmitter. '0' - does not allow. -*/ -enum iwl_device_power_flags { - DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), -}; - -/** - * struct iwl_device_power_cmd - device wide power command. - * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) - * - * @flags: Power table command flags from DEVICE_POWER_FLAGS_* - */ -struct iwl_device_power_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_6 */ - __le16 flags; - __le16 reserved; -} __packed; - -/** - * struct iwl_mac_power_cmd - New power command containing uAPSD support - * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) - * @id_and_color: MAC contex identifier - * @flags: Power table command flags from POWER_FLAGS_* - * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. - * Minimum allowed:- 3 * DTIM. Keep alive period must be - * set regardless of power scheme or current power state. - * FW use this value also when PM is disabled. - * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to - * PSM transition - legacy PM - * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to - * PSM transition - legacy PM - * @sleep_interval: not in use - * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag - * is set. For example, if it is required to skip over - * one DTIM, this value need to be set to 2 (DTIM periods). - * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to - * PSM transition - uAPSD - * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to - * PSM transition - uAPSD - * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. - * Default: 80dbm - * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set - * @snooze_interval: Maximum time between attempts to retrieve buffered data - * from the AP [msec] - * @snooze_window: A window of time in which PBW snoozing insures that all - * packets received. It is also the minimum time from last - * received unicast RX packet, before client stops snoozing - * for data. [msec] - * @snooze_step: TBD - * @qndp_tid: TID client shall use for uAPSD QNDP triggers - * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for - * each corresponding AC. - * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. - * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct - * values. - * @heavy_tx_thld_packets: TX threshold measured in number of packets - * @heavy_rx_thld_packets: RX threshold measured in number of packets - * @heavy_tx_thld_percentage: TX threshold measured in load's percentage - * @heavy_rx_thld_percentage: RX threshold measured in load's percentage - * @limited_ps_threshold: -*/ -struct iwl_mac_power_cmd { - /* CONTEXT_DESC_API_T_VER_1 */ - __le32 id_and_color; - - /* CLIENT_PM_POWER_TABLE_S_VER_1 */ - __le16 flags; - __le16 keep_alive_seconds; - __le32 rx_data_timeout; - __le32 tx_data_timeout; - __le32 rx_data_timeout_uapsd; - __le32 tx_data_timeout_uapsd; - u8 lprx_rssi_threshold; - u8 skip_dtim_periods; - __le16 snooze_interval; - __le16 snooze_window; - u8 snooze_step; - u8 qndp_tid; - u8 uapsd_ac_flags; - u8 uapsd_max_sp; - u8 heavy_tx_thld_packets; - u8 heavy_rx_thld_packets; - u8 heavy_tx_thld_percentage; - u8 heavy_rx_thld_percentage; - u8 limited_ps_threshold; - u8 reserved; -} __packed; - -/* - * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when - * associated AP is identified as improperly implementing uAPSD protocol. - * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 - * @sta_id: index of station in uCode's station table - associated AP ID in - * this context. - */ -struct iwl_uapsd_misbehaving_ap_notif { - __le32 sta_id; - u8 mac_id; - u8 reserved[3]; -} __packed; - -/** - * struct iwl_reduce_tx_power_cmd - TX power reduction command - * REDUCE_TX_POWER_CMD = 0x9f - * @flags: (reserved for future implementation) - * @mac_context_id: id of the mac ctx for which we are reducing TX power. - * @pwr_restriction: TX power restriction in dBms. - */ -struct iwl_reduce_tx_power_cmd { - u8 flags; - u8 mac_context_id; - __le16 pwr_restriction; -} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ - -enum iwl_dev_tx_power_cmd_mode { - IWL_TX_POWER_MODE_SET_MAC = 0, - IWL_TX_POWER_MODE_SET_DEVICE = 1, - IWL_TX_POWER_MODE_SET_CHAINS = 2, -}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */; - -/** - * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command - * @set_mode: see &enum iwl_dev_tx_power_cmd_mode - * @mac_context_id: id of the mac ctx for which we are reducing TX power. - * @pwr_restriction: TX power restriction in 1/8 dBms. - * @dev_24: device TX power restriction in 1/8 dBms - * @dev_52_low: device TX power restriction upper band - low - * @dev_52_high: device TX power restriction upper band - high - */ -struct iwl_dev_tx_power_cmd_v2 { - __le32 set_mode; - __le32 mac_context_id; - __le16 pwr_restriction; - __le16 dev_24; - __le16 dev_52_low; - __le16 dev_52_high; -} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */ - -#define IWL_NUM_CHAIN_LIMITS 2 -#define IWL_NUM_SUB_BANDS 5 - -/** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * @v2: version 2 of the command, embedded here for easier software handling - * @per_chain_restriction: per chain restrictions - */ -struct iwl_dev_tx_power_cmd { - /* v3 is just an extension of v2 - keep this here */ - struct iwl_dev_tx_power_cmd_v2 v2; - __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; -} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ - -#define IWL_DEV_MAX_TX_POWER 0x7FFF - -/** - * struct iwl_beacon_filter_cmd - * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) - * @id_and_color: MAC contex identifier - * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon - * to driver if delta in Energy values calculated for this and last - * passed beacon is greater than this threshold. Zero value means that - * the Energy change is ignored for beacon filtering, and beacon will - * not be forced to be sent to driver regardless of this delta. Typical - * energy delta 5dB. - * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. - * Send beacon to driver if delta in Energy values calculated for this - * and last passed beacon is greater than this threshold. Zero value - * means that the Energy change is ignored for beacon filtering while in - * Roaming state, typical energy delta 1dB. - * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values - * calculated for current beacon is less than the threshold, use - * Roaming Energy Delta Threshold, otherwise use normal Energy Delta - * Threshold. Typical energy threshold is -72dBm. - * @bf_temp_threshold: This threshold determines the type of temperature - * filtering (Slow or Fast) that is selected (Units are in Celsuis): - * If the current temperature is above this threshold - Fast filter - * will be used, If the current temperature is below this threshold - - * Slow filter will be used. - * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values - * calculated for this and the last passed beacon is greater than this - * threshold. Zero value means that the temperature change is ignored for - * beacon filtering; beacons will not be forced to be sent to driver - * regardless of whether its temerature has been changed. - * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values - * calculated for this and the last passed beacon is greater than this - * threshold. Zero value means that the temperature change is ignored for - * beacon filtering; beacons will not be forced to be sent to driver - * regardless of whether its temerature has been changed. - * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. - * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed - * for a specific period of time. Units: Beacons. - * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed - * for a longer period of time then this escape-timeout. Units: Beacons. - * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. - */ -struct iwl_beacon_filter_cmd { - __le32 bf_energy_delta; - __le32 bf_roaming_energy_delta; - __le32 bf_roaming_state; - __le32 bf_temp_threshold; - __le32 bf_temp_fast_filter; - __le32 bf_temp_slow_filter; - __le32 bf_enable_beacon_filter; - __le32 bf_debug_flag; - __le32 bf_escape_timer; - __le32 ba_escape_timer; - __le32 ba_enable_beacon_abort; -} __packed; - -/* Beacon filtering and beacon abort */ -#define IWL_BF_ENERGY_DELTA_DEFAULT 5 -#define IWL_BF_ENERGY_DELTA_D0I3 20 -#define IWL_BF_ENERGY_DELTA_MAX 255 -#define IWL_BF_ENERGY_DELTA_MIN 0 - -#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 -#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 -#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 -#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 - -#define IWL_BF_ROAMING_STATE_DEFAULT 72 -#define IWL_BF_ROAMING_STATE_D0I3 72 -#define IWL_BF_ROAMING_STATE_MAX 255 -#define IWL_BF_ROAMING_STATE_MIN 0 - -#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 -#define IWL_BF_TEMP_THRESHOLD_D0I3 112 -#define IWL_BF_TEMP_THRESHOLD_MAX 255 -#define IWL_BF_TEMP_THRESHOLD_MIN 0 - -#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 -#define IWL_BF_TEMP_FAST_FILTER_D0I3 1 -#define IWL_BF_TEMP_FAST_FILTER_MAX 255 -#define IWL_BF_TEMP_FAST_FILTER_MIN 0 - -#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 -#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 -#define IWL_BF_TEMP_SLOW_FILTER_MAX 255 -#define IWL_BF_TEMP_SLOW_FILTER_MIN 0 - -#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 - -#define IWL_BF_DEBUG_FLAG_DEFAULT 0 -#define IWL_BF_DEBUG_FLAG_D0I3 0 - -#define IWL_BF_ESCAPE_TIMER_DEFAULT 0 -#define IWL_BF_ESCAPE_TIMER_D0I3 0 -#define IWL_BF_ESCAPE_TIMER_MAX 1024 -#define IWL_BF_ESCAPE_TIMER_MIN 0 - -#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 -#define IWL_BA_ESCAPE_TIMER_D0I3 6 -#define IWL_BA_ESCAPE_TIMER_D3 9 -#define IWL_BA_ESCAPE_TIMER_MAX 1024 -#define IWL_BA_ESCAPE_TIMER_MIN 0 - -#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 - -#define IWL_BF_CMD_CONFIG(mode) \ - .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ - .bf_roaming_energy_delta = \ - cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ - .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ - .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ - .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ - .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ - .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ - .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ - .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) - -#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) -#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) -#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h deleted file mode 100644 index 0f1ea80a55ef..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h +++ /dev/null @@ -1,389 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_rs_h__ -#define __fw_api_rs_h__ - -#include "fw-api-mac.h" - -/* - * These serve as indexes into - * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; - * TODO: avoid overlap between legacy and HT rates - */ -enum { - IWL_RATE_1M_INDEX = 0, - IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, - IWL_RATE_2M_INDEX, - IWL_RATE_5M_INDEX, - IWL_RATE_11M_INDEX, - IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, - IWL_RATE_6M_INDEX, - IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, - IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, - IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, - IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, - IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, - IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, - IWL_RATE_18M_INDEX, - IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, - IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, - IWL_RATE_36M_INDEX, - IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, - IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, - IWL_RATE_54M_INDEX, - IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, - IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, - IWL_RATE_60M_INDEX, - IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, - IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, - IWL_RATE_MCS_8_INDEX, - IWL_RATE_MCS_9_INDEX, - IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, - IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, - IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, -}; - -#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) - -/* fw API values for legacy bit rates, both OFDM and CCK */ -enum { - IWL_RATE_6M_PLCP = 13, - IWL_RATE_9M_PLCP = 15, - IWL_RATE_12M_PLCP = 5, - IWL_RATE_18M_PLCP = 7, - IWL_RATE_24M_PLCP = 9, - IWL_RATE_36M_PLCP = 11, - IWL_RATE_48M_PLCP = 1, - IWL_RATE_54M_PLCP = 3, - IWL_RATE_1M_PLCP = 10, - IWL_RATE_2M_PLCP = 20, - IWL_RATE_5M_PLCP = 55, - IWL_RATE_11M_PLCP = 110, - IWL_RATE_INVM_PLCP = -1, -}; - -/* - * rate_n_flags bit fields - * - * The 32-bit value has different layouts in the low 8 bites depending on the - * format. There are three formats, HT, VHT and legacy (11abg, with subformats - * for CCK and OFDM). - * - * High-throughput (HT) rate format - * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) - * Very High-throughput (VHT) rate format - * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) - * Legacy OFDM rate format for bits 7:0 - * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) - * Legacy CCK rate format for bits 7:0: - * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) - */ - -/* Bit 8: (1) HT format, (0) legacy or VHT format */ -#define RATE_MCS_HT_POS 8 -#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) - -/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ -#define RATE_MCS_CCK_POS 9 -#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) - -/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ -#define RATE_MCS_VHT_POS 26 -#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) - - -/* - * High-throughput (HT) rate format for bits 7:0 - * - * 2-0: MCS rate base - * 0) 6 Mbps - * 1) 12 Mbps - * 2) 18 Mbps - * 3) 24 Mbps - * 4) 36 Mbps - * 5) 48 Mbps - * 6) 54 Mbps - * 7) 60 Mbps - * 4-3: 0) Single stream (SISO) - * 1) Dual stream (MIMO) - * 2) Triple stream (MIMO) - * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data - * (bits 7-6 are zero) - * - * Together the low 5 bits work out to the MCS index because we don't - * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two - * streams and 16-23 have three streams. We could also support MCS 32 - * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) - */ -#define RATE_HT_MCS_RATE_CODE_MSK 0x7 -#define RATE_HT_MCS_NSS_POS 3 -#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS) - -/* Bit 10: (1) Use Green Field preamble */ -#define RATE_HT_MCS_GF_POS 10 -#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) - -#define RATE_HT_MCS_INDEX_MSK 0x3f - -/* - * Very High-throughput (VHT) rate format for bits 7:0 - * - * 3-0: VHT MCS (0-9) - * 5-4: number of streams - 1: - * 0) Single stream (SISO) - * 1) Dual stream (MIMO) - * 2) Triple stream (MIMO) - */ - -/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ -#define RATE_VHT_MCS_RATE_CODE_MSK 0xf -#define RATE_VHT_MCS_NSS_POS 4 -#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) - -/* - * Legacy OFDM rate format for bits 7:0 - * - * 3-0: 0xD) 6 Mbps - * 0xF) 9 Mbps - * 0x5) 12 Mbps - * 0x7) 18 Mbps - * 0x9) 24 Mbps - * 0xB) 36 Mbps - * 0x1) 48 Mbps - * 0x3) 54 Mbps - * (bits 7-4 are 0) - * - * Legacy CCK rate format for bits 7:0: - * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): - * - * 6-0: 10) 1 Mbps - * 20) 2 Mbps - * 55) 5.5 Mbps - * 110) 11 Mbps - * (bit 7 is 0) - */ -#define RATE_LEGACY_RATE_MSK 0xff - - -/* - * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz - * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT - */ -#define RATE_MCS_CHAN_WIDTH_POS 11 -#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) - -/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ -#define RATE_MCS_SGI_POS 13 -#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) - -/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ -#define RATE_MCS_ANT_POS 14 -#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ - RATE_MCS_ANT_B_MSK) -#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ - RATE_MCS_ANT_C_MSK) -#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK -#define RATE_MCS_ANT_NUM 3 - -/* Bit 17-18: (0) SS, (1) SS*2 */ -#define RATE_MCS_STBC_POS 17 -#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS) -#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS) - -/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ -#define RATE_MCS_BF_POS 19 -#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) - -/* Bit 20: (0) ZLF is off, (1) ZLF is on */ -#define RATE_MCS_ZLF_POS 20 -#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS) - -/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ -#define RATE_MCS_DUP_POS 24 -#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) - -/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ -#define RATE_MCS_LDPC_POS 27 -#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) - - -/* Link Quality definitions */ - -/* # entries in rate scale table to support Tx retries */ -#define LQ_MAX_RETRY_NUM 16 - -/* Link quality command flags bit fields */ - -/* Bit 0: (0) Don't use RTS (1) Use RTS */ -#define LQ_FLAG_USE_RTS_POS 0 -#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) - -/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ -#define LQ_FLAG_COLOR_POS 1 -#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) - -/* Bit 4-5: Tx RTS BW Signalling - * (0) No RTS BW signalling - * (1) Static BW signalling - * (2) Dynamic BW signalling - */ -#define LQ_FLAG_RTS_BW_SIG_POS 4 -#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) -#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) -#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) - -/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection - * Dyanmic BW selection allows Tx with narrower BW then requested in rates - */ -#define LQ_FLAG_DYNAMIC_BW_POS 6 -#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) - -/* Single Stream Tx Parameters (lq_cmd->ss_params) - * Flags to control a smart FW decision about whether BFER/STBC/SISO will be - * used for single stream Tx. - */ - -/* Bit 0-1: Max STBC streams allowed. Can be 0-3. - * (0) - No STBC allowed - * (1) - 2x1 STBC allowed (HT/VHT) - * (2) - 4x2 STBC allowed (HT/VHT) - * (3) - 3x2 STBC allowed (HT only) - * All our chips are at most 2 antennas so only (1) is valid for now. - */ -#define LQ_SS_STBC_ALLOWED_POS 0 -#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) - -/* 2x1 STBC is allowed */ -#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) - -/* Bit 2: Beamformer (VHT only) is allowed */ -#define LQ_SS_BFER_ALLOWED_POS 2 -#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) - -/* Bit 3: Force BFER or STBC for testing - * If this is set: - * If BFER is allowed then force the ucode to choose BFER else - * If STBC is allowed then force the ucode to choose STBC over SISO - */ -#define LQ_SS_FORCE_POS 3 -#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) - -/* Bit 31: ss_params field is valid. Used for FW backward compatibility - * with other drivers which don't support the ss_params API yet - */ -#define LQ_SS_PARAMS_VALID_POS 31 -#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) - -/** - * struct iwl_lq_cmd - link quality command - * @sta_id: station to update - * @control: not used - * @flags: combination of LQ_FLAG_* - * @mimo_delim: the first SISO index in rs_table, which separates MIMO - * and SISO rates - * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). - * Should be ANT_[ABC] - * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] - * @initial_rate_index: first index from rs_table per AC category - * @agg_time_limit: aggregation max time threshold in usec/100, meaning - * value of 100 is one usec. Range is 100 to 8000 - * @agg_disable_start_th: try-count threshold for starting aggregation. - * If a frame has higher try-count, it should not be selected for - * starting an aggregation sequence. - * @agg_frame_cnt_limit: max frame count in an aggregation. - * 0: no limit - * 1: no aggregation (one frame per aggregation) - * 2 - 0x3f: maximal number of frames (up to 3f == 63) - * @rs_table: array of rates for each TX try, each is rate_n_flags, - * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP - * @ss_params: single stream features. declare whether STBC or BFER are allowed. - */ -struct iwl_lq_cmd { - u8 sta_id; - u8 reduced_tpc; - u16 control; - /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ - u8 flags; - u8 mimo_delim; - u8 single_stream_ant_msk; - u8 dual_stream_ant_msk; - u8 initial_rate_index[AC_NUM]; - /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ - __le16 agg_time_limit; - u8 agg_disable_start_th; - u8 agg_frame_cnt_limit; - __le32 reserved2; - __le32 rs_table[LQ_MAX_RETRY_NUM]; - __le32 ss_params; -}; /* LINK_QUALITY_CMD_API_S_VER_1 */ -#endif /* __fw_api_rs_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h deleted file mode 100644 index 9b7e49d4620f..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h +++ /dev/null @@ -1,238 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_rx_h__ -#define __fw_api_rx_h__ - -#define IWL_RX_INFO_PHY_CNT 8 -#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 -#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff -#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 -#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 -#define IWL_RX_INFO_ENERGY_ANT_A_POS 0 -#define IWL_RX_INFO_ENERGY_ANT_B_POS 8 -#define IWL_RX_INFO_ENERGY_ANT_C_POS 16 - -/** - * struct iwl_rx_phy_info - phy info - * (REPLY_RX_PHY_CMD = 0xc0) - * @non_cfg_phy_cnt: non configurable DSP phy data byte count - * @cfg_phy_cnt: configurable DSP phy data byte count - * @stat_id: configurable DSP phy data set ID - * @reserved1: - * @system_timestamp: GP2 at on air rise - * @timestamp: TSF at on air rise - * @beacon_time_stamp: beacon at on-air rise - * @phy_flags: general phy flags: band, modulation, ... - * @channel: channel number - * @non_cfg_phy_buf: for various implementations of non_cfg_phy - * @rate_n_flags: RATE_MCS_* - * @byte_count: frame's byte-count - * @frame_time: frame's time on the air, based on byte count and frame rate - * calculation - * @mac_active_msk: what MACs were active when the frame was received - * - * Before each Rx, the device sends this data. It contains PHY information - * about the reception of the packet. - */ -struct iwl_rx_phy_info { - u8 non_cfg_phy_cnt; - u8 cfg_phy_cnt; - u8 stat_id; - u8 reserved1; - __le32 system_timestamp; - __le64 timestamp; - __le32 beacon_time_stamp; - __le16 phy_flags; - __le16 channel; - __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; - __le32 rate_n_flags; - __le32 byte_count; - __le16 mac_active_msk; - __le16 frame_time; -} __packed; - -/* - * TCP offload Rx assist info - * - * bits 0:3 - reserved - * bits 4:7 - MIC CRC length - * bits 8:12 - MAC header length - * bit 13 - Padding indication - * bit 14 - A-AMSDU indication - * bit 15 - Offload enabled - */ -enum iwl_csum_rx_assist_info { - CSUM_RXA_RESERVED_MASK = 0x000f, - CSUM_RXA_MICSIZE_MASK = 0x00f0, - CSUM_RXA_HEADERLEN_MASK = 0x1f00, - CSUM_RXA_PADD = BIT(13), - CSUM_RXA_AMSDU = BIT(14), - CSUM_RXA_ENA = BIT(15) -}; - -/** - * struct iwl_rx_mpdu_res_start - phy info - * @assist: see CSUM_RX_ASSIST_ above - */ -struct iwl_rx_mpdu_res_start { - __le16 byte_count; - __le16 assist; -} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ - -/** - * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags - * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band - * @RX_RES_PHY_FLAGS_MOD_CCK: - * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short - * @RX_RES_PHY_FLAGS_NARROW_BAND: - * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received - * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU - * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame - * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble - * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame - */ -enum iwl_rx_phy_flags { - RX_RES_PHY_FLAGS_BAND_24 = BIT(0), - RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), - RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), - RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), - RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), - RX_RES_PHY_FLAGS_ANTENNA_POS = 4, - RX_RES_PHY_FLAGS_AGG = BIT(7), - RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), - RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), - RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), -}; - -/** - * enum iwl_mvm_rx_status - written by fw for each Rx packet - * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine - * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow - * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: - * @RX_MPDU_RES_STATUS_KEY_VALID: - * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: - * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed - * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked - * in the driver. - * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine - * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or - * alg = CCM only. Checks replay attack for 11w frames. Relevant only if - * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. - * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted - * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP - * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM - * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP - * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC - * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted - * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm - * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted - * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP: - * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: - * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: - * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame - * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw - * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors - * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK: - * @RX_MPDU_RES_STATUS_STA_ID_MSK: - * @RX_MPDU_RES_STATUS_RRF_KILL: - * @RX_MPDU_RES_STATUS_FILTERING_MSK: - * @RX_MPDU_RES_STATUS2_FILTERING_MSK: - */ -enum iwl_mvm_rx_status { - RX_MPDU_RES_STATUS_CRC_OK = BIT(0), - RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), - RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), - RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), - RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), - RX_MPDU_RES_STATUS_ICV_OK = BIT(5), - RX_MPDU_RES_STATUS_MIC_OK = BIT(6), - RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), - RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), - RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), - RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), - RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), - RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), - RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8), - RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8), - RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), - RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), - RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), - RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12), - RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), - RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), - RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), - RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), - RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), - RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000), - RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000), - RX_MPDU_RES_STATUS_RRF_KILL = BIT(29), - RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), - RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), -}; - -#endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h deleted file mode 100644 index 3a657e4b60ac..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ /dev/null @@ -1,730 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_scan_h__ -#define __fw_api_scan_h__ - -#include "fw-api.h" - -/* Scan Commands, Responses, Notifications */ - -/* Max number of IEs for direct SSID scans in a command */ -#define PROBE_OPTION_MAX 20 - -/** - * struct iwl_ssid_ie - directed scan network information element - * - * Up to 20 of these may appear in REPLY_SCAN_CMD, - * selected by "type" bit field in struct iwl_scan_channel; - * each channel may select different ssids from among the 20 entries. - * SSID IEs get transmitted in reverse order of entry. - */ -struct iwl_ssid_ie { - u8 id; - u8 len; - u8 ssid[IEEE80211_MAX_SSID_LEN]; -} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ - -/* scan offload */ -#define IWL_SCAN_MAX_BLACKLIST_LEN 64 -#define IWL_SCAN_SHORT_BLACKLIST_LEN 16 -#define IWL_SCAN_MAX_PROFILES 11 -#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 - -/* Default watchdog (in MS) for scheduled scan iteration */ -#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) - -#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) -#define CAN_ABORT_STATUS 1 - -#define IWL_FULL_SCAN_MULTIPLIER 5 -#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 -#define IWL_MAX_SCHED_SCAN_PLANS 2 - -enum scan_framework_client { - SCAN_CLIENT_SCHED_SCAN = BIT(0), - SCAN_CLIENT_NETDETECT = BIT(1), - SCAN_CLIENT_ASSET_TRACKING = BIT(2), -}; - -/** - * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S - * @ssid: MAC address to filter out - * @reported_rssi: AP rssi reported to the host - * @client_bitmap: clients ignore this entry - enum scan_framework_client - */ -struct iwl_scan_offload_blacklist { - u8 ssid[ETH_ALEN]; - u8 reported_rssi; - u8 client_bitmap; -} __packed; - -enum iwl_scan_offload_network_type { - IWL_NETWORK_TYPE_BSS = 1, - IWL_NETWORK_TYPE_IBSS = 2, - IWL_NETWORK_TYPE_ANY = 3, -}; - -enum iwl_scan_offload_band_selection { - IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, - IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, - IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, -}; - -/** - * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S - * @ssid_index: index to ssid list in fixed part - * @unicast_cipher: encryption algorithm to match - bitmap - * @aut_alg: authentication algorithm to match - bitmap - * @network_type: enum iwl_scan_offload_network_type - * @band_selection: enum iwl_scan_offload_band_selection - * @client_bitmap: clients waiting for match - enum scan_framework_client - */ -struct iwl_scan_offload_profile { - u8 ssid_index; - u8 unicast_cipher; - u8 auth_alg; - u8 network_type; - u8 band_selection; - u8 client_bitmap; - u8 reserved[2]; -} __packed; - -/** - * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1 - * @blaclist: AP list to filter off from scan results - * @profiles: profiles to search for match - * @blacklist_len: length of blacklist - * @num_profiles: num of profiles in the list - * @match_notify: clients waiting for match found notification - * @pass_match: clients waiting for the results - * @active_clients: active clients bitmap - enum scan_framework_client - * @any_beacon_notify: clients waiting for match notification without match - */ -struct iwl_scan_offload_profile_cfg { - struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; - u8 blacklist_len; - u8 num_profiles; - u8 match_notify; - u8 pass_match; - u8 active_clients; - u8 any_beacon_notify; - u8 reserved[2]; -} __packed; - -/** - * iwl_scan_schedule_lmac - schedule of scan offload - * @delay: delay between iterations, in seconds. - * @iterations: num of scan iterations - * @full_scan_mul: number of partial scans before each full scan - */ -struct iwl_scan_schedule_lmac { - __le16 delay; - u8 iterations; - u8 full_scan_mul; -} __packed; /* SCAN_SCHEDULE_API_S */ - -enum iwl_scan_offload_complete_status { - IWL_SCAN_OFFLOAD_COMPLETED = 1, - IWL_SCAN_OFFLOAD_ABORTED = 2, -}; - -enum iwl_scan_ebs_status { - IWL_SCAN_EBS_SUCCESS, - IWL_SCAN_EBS_FAILED, - IWL_SCAN_EBS_CHAN_NOT_FOUND, - IWL_SCAN_EBS_INACTIVE, -}; - -/** - * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S - * @tx_flags: combination of TX_CMD_FLG_* - * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is - * cleared. Combination of RATE_MCS_* - * @sta_id: index of destination station in FW station table - * @reserved: for alignment and future use - */ -struct iwl_scan_req_tx_cmd { - __le32 tx_flags; - __le32 rate_n_flags; - u8 sta_id; - u8 reserved[3]; -} __packed; - -enum iwl_scan_channel_flags_lmac { - IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27), - IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28), -}; - -/** - * iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2 - * @flags: bits 1-20: directed scan to i'th ssid - * other bits &enum iwl_scan_channel_flags_lmac - * @channel_number: channel number 1-13 etc - * @iter_count: scan iteration on this channel - * @iter_interval: interval in seconds between iterations on one channel - */ -struct iwl_scan_channel_cfg_lmac { - __le32 flags; - __le16 channel_num; - __le16 iter_count; - __le32 iter_interval; -} __packed; - -/* - * iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 - * @offset: offset in the data block - * @len: length of the segment - */ -struct iwl_scan_probe_segment { - __le16 offset; - __le16 len; -} __packed; - -/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 - * @mac_header: first (and common) part of the probe - * @band_data: band specific data - * @common_data: last (and common) part of the probe - * @buf: raw data block - */ -struct iwl_scan_probe_req { - struct iwl_scan_probe_segment mac_header; - struct iwl_scan_probe_segment band_data[2]; - struct iwl_scan_probe_segment common_data; - u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; -} __packed; - -enum iwl_scan_channel_flags { - IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), -}; - -/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S - * @flags: enum iwl_scan_channel_flags - * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is - * involved. - * 1 - EBS is disabled. - * 2 - every second scan will be full scan(and so on). - */ -struct iwl_scan_channel_opt { - __le16 flags; - __le16 non_ebs_ratio; -} __packed; - -/** - * iwl_mvm_lmac_scan_flags - * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses - * without filtering. - * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels - * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan - * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification - * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching - * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented - * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report - * and DS parameter set IEs into probe requests. - * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches - */ -enum iwl_mvm_lmac_scan_flags { - IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), - IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1), - IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2), - IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), - IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), - IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), - IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), - IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), -}; - -enum iwl_scan_priority { - IWL_SCAN_PRIORITY_LOW, - IWL_SCAN_PRIORITY_MEDIUM, - IWL_SCAN_PRIORITY_HIGH, -}; - -enum iwl_scan_priority_ext { - IWL_SCAN_PRIORITY_EXT_0_LOWEST, - IWL_SCAN_PRIORITY_EXT_1, - IWL_SCAN_PRIORITY_EXT_2, - IWL_SCAN_PRIORITY_EXT_3, - IWL_SCAN_PRIORITY_EXT_4, - IWL_SCAN_PRIORITY_EXT_5, - IWL_SCAN_PRIORITY_EXT_6, - IWL_SCAN_PRIORITY_EXT_7_HIGHEST, -}; - -/** - * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 - * @reserved1: for alignment and future use - * @channel_num: num of channels to scan - * @active-dwell: dwell time for active channels - * @passive-dwell: dwell time for passive channels - * @fragmented-dwell: dwell time for fragmented passive scan - * @reserved2: for alignment and future use - * @rx_chain_selct: PHY_RX_CHAIN_* flags - * @scan_flags: &enum iwl_mvm_lmac_scan_flags - * @max_out_time: max time (in TU) to be out of associated channel - * @suspend_time: pause scan this long (TUs) when returning to service channel - * @flags: RXON flags - * @filter_flags: RXON filter - * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz - * @direct_scan: list of SSIDs for directed active scan - * @scan_prio: enum iwl_scan_priority - * @iter_num: number of scan iterations - * @delay: delay in seconds before first iteration - * @schedule: two scheduling plans. The first one is finite, the second one can - * be infinite. - * @channel_opt: channel optimization options, for full and partial scan - * @data: channel configuration and probe request packet. - */ -struct iwl_scan_req_lmac { - /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */ - __le32 reserved1; - u8 n_channels; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; - __le16 reserved2; - __le16 rx_chain_select; - __le32 scan_flags; - __le32 max_out_time; - __le32 suspend_time; - /* RX_ON_FLAGS_API_S_VER_1 */ - __le32 flags; - __le32 filter_flags; - struct iwl_scan_req_tx_cmd tx_cmd[2]; - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; - __le32 scan_prio; - /* SCAN_REQ_PERIODIC_PARAMS_API_S */ - __le32 iter_num; - __le32 delay; - struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS]; - struct iwl_scan_channel_opt channel_opt[2]; - u8 data[]; -} __packed; - -/** - * struct iwl_scan_results_notif - scan results for one channel - - * SCAN_RESULT_NTF_API_S_VER_3 - * @channel: which channel the results are from - * @band: 0 for 5.2 GHz, 1 for 2.4 GHz - * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request - * @num_probe_not_sent: # of request that weren't sent due to not enough time - * @duration: duration spent in channel, in usecs - */ -struct iwl_scan_results_notif { - u8 channel; - u8 band; - u8 probe_status; - u8 num_probe_not_sent; - __le32 duration; -} __packed; - -/** - * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels) - * SCAN_COMPLETE_NTF_API_S_VER_3 - * @scanned_channels: number of channels scanned (and number of valid results) - * @status: one of SCAN_COMP_STATUS_* - * @bt_status: BT on/off status - * @last_channel: last channel that was scanned - * @tsf_low: TSF timer (lower half) in usecs - * @tsf_high: TSF timer (higher half) in usecs - * @results: an array of scan results, only "scanned_channels" of them are valid - */ -struct iwl_lmac_scan_complete_notif { - u8 scanned_channels; - u8 status; - u8 bt_status; - u8 last_channel; - __le32 tsf_low; - __le32 tsf_high; - struct iwl_scan_results_notif results[]; -} __packed; - -/** - * iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 - * @last_schedule_line: last schedule line executed (fast or regular) - * @last_schedule_iteration: last scan iteration executed before scan abort - * @status: enum iwl_scan_offload_complete_status - * @ebs_status: EBS success status &enum iwl_scan_ebs_status - * @time_after_last_iter; time in seconds elapsed after last iteration - */ -struct iwl_periodic_scan_complete { - u8 last_schedule_line; - u8 last_schedule_iteration; - u8 status; - u8 ebs_status; - __le32 time_after_last_iter; - __le32 reserved; -} __packed; - -/* UMAC Scan API */ - -/* The maximum of either of these cannot exceed 8, because we use an - * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). - */ -#define IWL_MVM_MAX_UMAC_SCANS 8 -#define IWL_MVM_MAX_LMAC_SCANS 1 - -enum scan_config_flags { - SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), - SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), - SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), - SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), - SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), - SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), - SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), - SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), - SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), - SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), - SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), - SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), - SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), - SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), - SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), - SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), - SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), - SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), - - /* Bits 26-31 are for num of channels in channel_array */ -#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) -}; - -enum scan_config_rates { - /* OFDM basic rates */ - SCAN_CONFIG_RATE_6M = BIT(0), - SCAN_CONFIG_RATE_9M = BIT(1), - SCAN_CONFIG_RATE_12M = BIT(2), - SCAN_CONFIG_RATE_18M = BIT(3), - SCAN_CONFIG_RATE_24M = BIT(4), - SCAN_CONFIG_RATE_36M = BIT(5), - SCAN_CONFIG_RATE_48M = BIT(6), - SCAN_CONFIG_RATE_54M = BIT(7), - /* CCK basic rates */ - SCAN_CONFIG_RATE_1M = BIT(8), - SCAN_CONFIG_RATE_2M = BIT(9), - SCAN_CONFIG_RATE_5M = BIT(10), - SCAN_CONFIG_RATE_11M = BIT(11), - - /* Bits 16-27 are for supported rates */ -#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) -}; - -enum iwl_channel_flags { - IWL_CHANNEL_FLAG_EBS = BIT(0), - IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), - IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), - IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), -}; - -/** - * struct iwl_scan_config - * @flags: enum scan_config_flags - * @tx_chains: valid_tx antenna - ANT_* definitions - * @rx_chains: valid_rx antenna - ANT_* definitions - * @legacy_rates: default legacy rates - enum scan_config_rates - * @out_of_channel_time: default max out of serving channel time - * @suspend_time: default max suspend time - * @dwell_active: default dwell time for active scan - * @dwell_passive: default dwell time for passive scan - * @dwell_fragmented: default dwell time for fragmented scan - * @reserved: for future use and alignment - * @mac_addr: default mac address to be used in probes - * @bcast_sta_id: the index of the station in the fw - * @channel_flags: default channel flags - enum iwl_channel_flags - * scan_config_channel_flag - * @channel_array: default supported channels - */ -struct iwl_scan_config { - __le32 flags; - __le32 tx_chains; - __le32 rx_chains; - __le32 legacy_rates; - __le32 out_of_channel_time; - __le32 suspend_time; - u8 dwell_active; - u8 dwell_passive; - u8 dwell_fragmented; - u8 reserved; - u8 mac_addr[ETH_ALEN]; - u8 bcast_sta_id; - u8 channel_flags; - u8 channel_array[]; -} __packed; /* SCAN_CONFIG_DB_CMD_API_S */ - -/** - * iwl_umac_scan_flags - *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request - * can be preempted by other scan requests with higher priority. - * The low priority scan will be resumed when the higher proirity scan is - * completed. - *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver - * when scan starts. - */ -enum iwl_umac_scan_flags { - IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), - IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), -}; - -enum iwl_umac_scan_uid_offsets { - IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, - IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, -}; - -enum iwl_umac_scan_general_flags { - IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), - IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), - IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), - IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), - IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), - IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), - IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), - IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), - IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), - IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9) -}; - -/** - * struct iwl_scan_channel_cfg_umac - * @flags: bitmap - 0-19: directed scan to i'th ssid. - * @channel_num: channel number 1-13 etc. - * @iter_count: repetition count for the channel. - * @iter_interval: interval between two scan iterations on one channel. - */ -struct iwl_scan_channel_cfg_umac { - __le32 flags; - u8 channel_num; - u8 iter_count; - __le16 iter_interval; -} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */ - -/** - * struct iwl_scan_umac_schedule - * @interval: interval in seconds between scan iterations - * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop - * @reserved: for alignment and future use - */ -struct iwl_scan_umac_schedule { - __le16 interval; - u8 iter_count; - u8 reserved; -} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ - -/** - * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command - * parameters following channels configuration array. - * @schedule: two scheduling plans. - * @delay: delay in TUs before starting the first scan iteration - * @reserved: for future use and alignment - * @preq: probe request with IEs blocks - * @direct_scan: list of SSIDs for directed active scan - */ -struct iwl_scan_req_umac_tail { - /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ - struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; - __le16 delay; - __le16 reserved; - /* SCAN_PROBE_PARAMS_API_S_VER_1 */ - struct iwl_scan_probe_req preq; - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; -} __packed; - -/** - * struct iwl_scan_req_umac - * @flags: &enum iwl_umac_scan_flags - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @ooc_priority: out of channel priority - &enum iwl_scan_priority - * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved1: for future use and alignment - * @active_dwell: dwell time for active scan - * @passive_dwell: dwell time for passive scan - * @fragmented_dwell: dwell time for fragmented passive scan - * @max_out_time: max out of serving channel time - * @suspend_time: max suspend time - * @scan_priority: scan internal prioritization &enum iwl_scan_priority - * @channel_flags: &enum iwl_scan_channel_flags - * @n_channels: num of channels in scan request - * @reserved2: for future use and alignment - * @data: &struct iwl_scan_channel_cfg_umac and - * &struct iwl_scan_req_umac_tail - */ -struct iwl_scan_req_umac { - __le32 flags; - __le32 uid; - __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_1 */ - __le32 general_flags; - u8 reserved1; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; - __le32 max_out_time; - __le32 suspend_time; - __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved2; - u8 data[]; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ - -/** - * struct iwl_umac_scan_abort - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @flags: reserved - */ -struct iwl_umac_scan_abort { - __le32 uid; - __le32 flags; -} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ - -/** - * struct iwl_umac_scan_complete - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @last_schedule: last scheduling line - * @last_iter: last scan iteration number - * @scan status: &enum iwl_scan_offload_complete_status - * @ebs_status: &enum iwl_scan_ebs_status - * @time_from_last_iter: time elapsed from last iteration - * @reserved: for future use - */ -struct iwl_umac_scan_complete { - __le32 uid; - u8 last_schedule; - u8 last_iter; - u8 status; - u8 ebs_status; - __le32 time_from_last_iter; - __le32 reserved; -} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ - -#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5 -/** - * struct iwl_scan_offload_profile_match - match information - * @bssid: matched bssid - * @channel: channel where the match occurred - * @energy: - * @matching_feature: - * @matching_channels: bitmap of channels that matched, referencing - * the channels passed in tue scan offload request - */ -struct iwl_scan_offload_profile_match { - u8 bssid[ETH_ALEN]; - __le16 reserved; - u8 channel; - u8 energy; - u8 matching_feature; - u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; -} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ - -/** - * struct iwl_scan_offload_profiles_query - match results query response - * @matched_profiles: bitmap of matched profiles, referencing the - * matches passed in the scan offload request - * @last_scan_age: age of the last offloaded scan - * @n_scans_done: number of offloaded scans done - * @gp2_d0u: GP2 when D0U occurred - * @gp2_invoked: GP2 when scan offload was invoked - * @resume_while_scanning: not used - * @self_recovery: obsolete - * @reserved: reserved - * @matches: array of match information, one for each match - */ -struct iwl_scan_offload_profiles_query { - __le32 matched_profiles; - __le32 last_scan_age; - __le32 n_scans_done; - __le32 gp2_d0u; - __le32 gp2_invoked; - u8 resume_while_scanning; - u8 self_recovery; - __le16 reserved; - struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; -} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ - -/** - * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @scanned_channels: number of channels scanned and number of valid elements in - * results array - * @status: one of SCAN_COMP_STATUS_* - * @bt_status: BT on/off status - * @last_channel: last channel that was scanned - * @tsf_low: TSF timer (lower half) in usecs - * @tsf_high: TSF timer (higher half) in usecs - * @results: array of scan results, only "scanned_channels" of them are valid - */ -struct iwl_umac_scan_iter_complete_notif { - __le32 uid; - u8 scanned_channels; - u8 status; - u8 bt_status; - u8 last_channel; - __le32 tsf_low; - __le32 tsf_high; - struct iwl_scan_results_notif results[]; -} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */ - -#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h deleted file mode 100644 index 493a8bdfbc9e..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h +++ /dev/null @@ -1,414 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_sta_h__ -#define __fw_api_sta_h__ - -/** - * enum iwl_sta_flags - flags for the ADD_STA host command - * @STA_FLG_REDUCED_TX_PWR_CTRL: - * @STA_FLG_REDUCED_TX_PWR_DATA: - * @STA_FLG_DISABLE_TX: set if TX should be disabled - * @STA_FLG_PS: set if STA is in Power Save - * @STA_FLG_INVALID: set if STA is invalid - * @STA_FLG_DLP_EN: Direct Link Protocol is enabled - * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs - * @STA_FLG_DRAIN_FLOW: drain flow - * @STA_FLG_PAN: STA is for PAN interface - * @STA_FLG_CLASS_AUTH: - * @STA_FLG_CLASS_ASSOC: - * @STA_FLG_CLASS_MIMO_PROT: - * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU - * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation - * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is - * initialised by driver and can be updated by fw upon reception of - * action frames that can change the channel width. When cleared the fw - * will send all the frames in 20MHz even when FAT channel is requested. - * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the - * driver and can be updated by fw upon reception of action frames. - * @STA_FLG_MFP_EN: Management Frame Protection - */ -enum iwl_sta_flags { - STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), - STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), - - STA_FLG_DISABLE_TX = BIT(4), - - STA_FLG_PS = BIT(8), - STA_FLG_DRAIN_FLOW = BIT(12), - STA_FLG_PAN = BIT(13), - STA_FLG_CLASS_AUTH = BIT(14), - STA_FLG_CLASS_ASSOC = BIT(15), - STA_FLG_RTS_MIMO_PROT = BIT(17), - - STA_FLG_MAX_AGG_SIZE_SHIFT = 19, - STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), - - STA_FLG_AGG_MPDU_DENS_SHIFT = 23, - STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), - - STA_FLG_FAT_EN_20MHZ = (0 << 26), - STA_FLG_FAT_EN_40MHZ = (1 << 26), - STA_FLG_FAT_EN_80MHZ = (2 << 26), - STA_FLG_FAT_EN_160MHZ = (3 << 26), - STA_FLG_FAT_EN_MSK = (3 << 26), - - STA_FLG_MIMO_EN_SISO = (0 << 28), - STA_FLG_MIMO_EN_MIMO2 = (1 << 28), - STA_FLG_MIMO_EN_MIMO3 = (2 << 28), - STA_FLG_MIMO_EN_MSK = (3 << 28), -}; - -/** - * enum iwl_sta_key_flag - key flags for the ADD_STA host command - * @STA_KEY_FLG_NO_ENC: no encryption - * @STA_KEY_FLG_WEP: WEP encryption algorithm - * @STA_KEY_FLG_CCM: CCMP encryption algorithm - * @STA_KEY_FLG_TKIP: TKIP encryption algorithm - * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) - * @STA_KEY_FLG_CMAC: CMAC encryption algorithm - * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm - * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value - * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from - * station info array (1 - n 1X mode) - * @STA_KEY_FLG_KEYID_MSK: the index of the key - * @STA_KEY_NOT_VALID: key is invalid - * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key - * @STA_KEY_MULTICAST: set for multical key - * @STA_KEY_MFP: key is used for Management Frame Protection - */ -enum iwl_sta_key_flag { - STA_KEY_FLG_NO_ENC = (0 << 0), - STA_KEY_FLG_WEP = (1 << 0), - STA_KEY_FLG_CCM = (2 << 0), - STA_KEY_FLG_TKIP = (3 << 0), - STA_KEY_FLG_EXT = (4 << 0), - STA_KEY_FLG_CMAC = (6 << 0), - STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), - STA_KEY_FLG_EN_MSK = (7 << 0), - - STA_KEY_FLG_WEP_KEY_MAP = BIT(3), - STA_KEY_FLG_KEYID_POS = 8, - STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), - STA_KEY_NOT_VALID = BIT(11), - STA_KEY_FLG_WEP_13BYTES = BIT(12), - STA_KEY_MULTICAST = BIT(14), - STA_KEY_MFP = BIT(15), -}; - -/** - * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed - * @STA_MODIFY_KEY: this command modifies %key - * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx - * @STA_MODIFY_TX_RATE: unused - * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid - * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid - * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count - * @STA_MODIFY_PROT_TH: - * @STA_MODIFY_QUEUES: modify the queues used by this station - */ -enum iwl_sta_modify_flag { - STA_MODIFY_KEY = BIT(0), - STA_MODIFY_TID_DISABLE_TX = BIT(1), - STA_MODIFY_TX_RATE = BIT(2), - STA_MODIFY_ADD_BA_TID = BIT(3), - STA_MODIFY_REMOVE_BA_TID = BIT(4), - STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), - STA_MODIFY_PROT_TH = BIT(6), - STA_MODIFY_QUEUES = BIT(7), -}; - -#define STA_MODE_MODIFY 1 - -/** - * enum iwl_sta_sleep_flag - type of sleep of the station - * @STA_SLEEP_STATE_AWAKE: - * @STA_SLEEP_STATE_PS_POLL: - * @STA_SLEEP_STATE_UAPSD: - * @STA_SLEEP_STATE_MOREDATA: set more-data bit on - * (last) released frame - */ -enum iwl_sta_sleep_flag { - STA_SLEEP_STATE_AWAKE = 0, - STA_SLEEP_STATE_PS_POLL = BIT(0), - STA_SLEEP_STATE_UAPSD = BIT(1), - STA_SLEEP_STATE_MOREDATA = BIT(2), -}; - -/* STA ID and color bits definitions */ -#define STA_ID_SEED (0x0f) -#define STA_ID_POS (0) -#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS) - -#define STA_COLOR_SEED (0x7) -#define STA_COLOR_POS (4) -#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS) - -#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \ - (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS) -#define STA_ID_N_COLOR_GET_ID(id_n_color) \ - (((id_n_color) & STA_ID_MSK) >> STA_ID_POS) - -#define STA_KEY_MAX_NUM (16) -#define STA_KEY_IDX_INVALID (0xff) -#define STA_KEY_MAX_DATA_KEY_NUM (4) -#define IWL_MAX_GLOBAL_KEYS (4) -#define STA_KEY_LEN_WEP40 (5) -#define STA_KEY_LEN_WEP104 (13) - -/** - * struct iwl_mvm_keyinfo - key information - * @key_flags: type %iwl_sta_key_flag - * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection - * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx - * @key_offset: key offset in the fw's key table - * @key: 16-byte unicast decryption key - * @tx_secur_seq_cnt: initial RSC / PN needed for replay check - * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only - * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only - */ -struct iwl_mvm_keyinfo { - __le16 key_flags; - u8 tkip_rx_tsc_byte2; - u8 reserved1; - __le16 tkip_rx_ttak[5]; - u8 key_offset; - u8 reserved2; - u8 key[16]; - __le64 tx_secur_seq_cnt; - __le64 hw_tkip_mic_rx_key; - __le64 hw_tkip_mic_tx_key; -} __packed; - -/** - * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. - * ( REPLY_ADD_STA = 0x18 ) - * @add_modify: 1: modify existing, 0: add new station - * @awake_acs: - * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable - * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. - * @mac_id_n_color: the Mac context this station belongs to - * @addr[ETH_ALEN]: station's MAC address - * @sta_id: index of station in uCode's station table - * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave - * alone. 1 - modify, 0 - don't change. - * @station_flags: look at %iwl_sta_flags - * @station_flags_msk: what of %station_flags have changed - * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) - * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set - * add_immediate_ba_ssn. - * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) - * Set %STA_MODIFY_REMOVE_BA_TID to use this field - * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with - * add_immediate_ba_tid. - * @sleep_tx_count: number of packets to transmit to station even though it is - * asleep. Used to synchronise PS-poll and u-APSD responses while ucode - * keeps track of STA sleep state. - * @sleep_state_flags: Look at %iwl_sta_sleep_flag. - * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP - * mac-addr. - * @beamform_flags: beam forming controls - * @tfd_queue_msk: tfd queues used by this station - * - * The device contains an internal table of per-station information, with info - * on security keys, aggregation parameters, and Tx rates for initial Tx - * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). - * - * ADD_STA sets up the table entry for one station, either creating a new - * entry, or modifying a pre-existing one. - */ -struct iwl_mvm_add_sta_cmd { - u8 add_modify; - u8 awake_acs; - __le16 tid_disable_tx; - __le32 mac_id_n_color; - u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ - __le16 reserved2; - u8 sta_id; - u8 modify_mask; - __le16 reserved3; - __le32 station_flags; - __le32 station_flags_msk; - u8 add_immediate_ba_tid; - u8 remove_immediate_ba_tid; - __le16 add_immediate_ba_ssn; - __le16 sleep_tx_count; - __le16 sleep_state_flags; - __le16 assoc_id; - __le16 beamform_flags; - __le32 tfd_queue_msk; -} __packed; /* ADD_STA_CMD_API_S_VER_7 */ - -/** - * struct iwl_mvm_add_sta_key_cmd - add/modify sta key - * ( REPLY_ADD_STA_KEY = 0x17 ) - * @sta_id: index of station in uCode's station table - * @key_offset: key offset in key storage - * @key_flags: type %iwl_sta_key_flag - * @key: key material data - * @key2: key material data - * @rx_secur_seq_cnt: RX security sequence counter for the key - * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection - * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx - */ -struct iwl_mvm_add_sta_key_cmd { - u8 sta_id; - u8 key_offset; - __le16 key_flags; - u8 key[16]; - u8 key2[16]; - u8 rx_secur_seq_cnt[16]; - u8 tkip_rx_tsc_byte2; - u8 reserved; - __le16 tkip_rx_ttak[5]; -} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ - -/** - * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command - * @ADD_STA_SUCCESS: operation was executed successfully - * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table - * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session - * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that - * doesn't exist. - */ -enum iwl_mvm_add_sta_rsp_status { - ADD_STA_SUCCESS = 0x1, - ADD_STA_STATIONS_OVERLOAD = 0x2, - ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, - ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, -}; - -/** - * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table - * ( REMOVE_STA = 0x19 ) - * @sta_id: the station id of the station to be removed - */ -struct iwl_mvm_rm_sta_cmd { - u8 sta_id; - u8 reserved[3]; -} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ - -/** - * struct iwl_mvm_mgmt_mcast_key_cmd - * ( MGMT_MCAST_KEY = 0x1f ) - * @ctrl_flags: %iwl_sta_key_flag - * @IGTK: - * @K1: unused - * @K2: unused - * @sta_id: station ID that support IGTK - * @key_id: - * @receive_seq_cnt: initial RSC/PN needed for replay check - */ -struct iwl_mvm_mgmt_mcast_key_cmd { - __le32 ctrl_flags; - u8 IGTK[16]; - u8 K1[16]; - u8 K2[16]; - __le32 key_id; - __le32 sta_id; - __le64 receive_seq_cnt; -} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ - -struct iwl_mvm_wep_key { - u8 key_index; - u8 key_offset; - __le16 reserved1; - u8 key_size; - u8 reserved2[3]; - u8 key[16]; -} __packed; - -struct iwl_mvm_wep_key_cmd { - __le32 mac_id_n_color; - u8 num_keys; - u8 decryption_type; - u8 flags; - u8 reserved; - struct iwl_mvm_wep_key wep_key[0]; -} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ - -/** - * struct iwl_mvm_eosp_notification - EOSP notification from firmware - * @remain_frame_count: # of frames remaining, non-zero if SP was cut - * short by GO absence - * @sta_id: station ID - */ -struct iwl_mvm_eosp_notification { - __le32 remain_frame_count; - __le32 sta_id; -} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ - -#endif /* __fw_api_sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h deleted file mode 100644 index 0c321f63ee42..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h +++ /dev/null @@ -1,284 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_stats_h__ -#define __fw_api_stats_h__ -#include "fw-api-mac.h" - -struct mvm_statistics_dbg { - __le32 burst_check; - __le32 burst_count; - __le32 wait_for_silence_timeout_cnt; - __le32 reserved[3]; -} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ - -struct mvm_statistics_div { - __le32 tx_on_a; - __le32 tx_on_b; - __le32 exec_time; - __le32 probe_time; - __le32 rssi_ant; - __le32 reserved2; -} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ - -struct mvm_statistics_rx_non_phy { - __le32 bogus_cts; /* CTS received when not expecting CTS */ - __le32 bogus_ack; /* ACK received when not expecting ACK */ - __le32 non_bssid_frames; /* number of frames with BSSID that - * doesn't belong to the STA BSSID */ - __le32 filtered_frames; /* count frames that were dumped in the - * filtering process */ - __le32 non_channel_beacons; /* beacons with our bss id but not on - * our serving channel */ - __le32 channel_beacons; /* beacons with our bss id and in our - * serving channel */ - __le32 num_missed_bcon; /* number of missed beacons */ - __le32 adc_rx_saturation_time; /* count in 0.8us units the time the - * ADC was in saturation */ - __le32 ina_detection_search_time;/* total time (in 0.8us) searched - * for INA */ - __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ - __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ - __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ - __le32 interference_data_flag; /* flag for interference data - * availability. 1 when data is - * available. */ - __le32 channel_load; /* counts RX Enable time in uSec */ - __le32 dsp_false_alarms; /* DSP false alarm (both OFDM - * and CCK) counter */ - __le32 beacon_rssi_a; - __le32 beacon_rssi_b; - __le32 beacon_rssi_c; - __le32 beacon_energy_a; - __le32 beacon_energy_b; - __le32 beacon_energy_c; - __le32 num_bt_kills; - __le32 mac_id; - __le32 directed_data_mpdu; -} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ - -struct mvm_statistics_rx_phy { - __le32 ina_cnt; - __le32 fina_cnt; - __le32 plcp_err; - __le32 crc32_err; - __le32 overrun_err; - __le32 early_overrun_err; - __le32 crc32_good; - __le32 false_alarm_cnt; - __le32 fina_sync_err_cnt; - __le32 sfd_timeout; - __le32 fina_timeout; - __le32 unresponded_rts; - __le32 rxe_frame_lmt_overrun; - __le32 sent_ack_cnt; - __le32 sent_cts_cnt; - __le32 sent_ba_rsp_cnt; - __le32 dsp_self_kill; - __le32 mh_format_err; - __le32 re_acq_main_rssi_sum; - __le32 reserved; -} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ - -struct mvm_statistics_rx_ht_phy { - __le32 plcp_err; - __le32 overrun_err; - __le32 early_overrun_err; - __le32 crc32_good; - __le32 crc32_err; - __le32 mh_format_err; - __le32 agg_crc32_good; - __le32 agg_mpdu_cnt; - __le32 agg_cnt; - __le32 unsupport_mcs; -} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ - -struct mvm_statistics_tx_non_phy { - __le32 preamble_cnt; - __le32 rx_detected_cnt; - __le32 bt_prio_defer_cnt; - __le32 bt_prio_kill_cnt; - __le32 few_bytes_cnt; - __le32 cts_timeout; - __le32 ack_timeout; - __le32 expected_ack_cnt; - __le32 actual_ack_cnt; - __le32 dump_msdu_cnt; - __le32 burst_abort_next_frame_mismatch_cnt; - __le32 burst_abort_missing_next_frame_cnt; - __le32 cts_timeout_collision; - __le32 ack_or_ba_timeout_collision; -} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */ - -#define MAX_CHAINS 3 - -struct mvm_statistics_tx_non_phy_agg { - __le32 ba_timeout; - __le32 ba_reschedule_frames; - __le32 scd_query_agg_frame_cnt; - __le32 scd_query_no_agg; - __le32 scd_query_agg; - __le32 scd_query_mismatch; - __le32 frame_not_ready; - __le32 underrun; - __le32 bt_prio_kill; - __le32 rx_ba_rsp_cnt; - __s8 txpower[MAX_CHAINS]; - __s8 reserved; - __le32 reserved2; -} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ - -struct mvm_statistics_tx_channel_width { - __le32 ext_cca_narrow_ch20[1]; - __le32 ext_cca_narrow_ch40[2]; - __le32 ext_cca_narrow_ch80[3]; - __le32 ext_cca_narrow_ch160[4]; - __le32 last_tx_ch_width_indx; - __le32 rx_detected_per_ch_width[4]; - __le32 success_per_ch_width[4]; - __le32 fail_per_ch_width[4]; -}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ - -struct mvm_statistics_tx { - struct mvm_statistics_tx_non_phy general; - struct mvm_statistics_tx_non_phy_agg agg; - struct mvm_statistics_tx_channel_width channel_width; -} __packed; /* STATISTICS_TX_API_S_VER_4 */ - - -struct mvm_statistics_bt_activity { - __le32 hi_priority_tx_req_cnt; - __le32 hi_priority_tx_denied_cnt; - __le32 lo_priority_tx_req_cnt; - __le32 lo_priority_tx_denied_cnt; - __le32 hi_priority_rx_req_cnt; - __le32 hi_priority_rx_denied_cnt; - __le32 lo_priority_rx_req_cnt; - __le32 lo_priority_rx_denied_cnt; -} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ - -struct mvm_statistics_general_v8 { - __le32 radio_temperature; - __le32 radio_voltage; - struct mvm_statistics_dbg dbg; - __le32 sleep_time; - __le32 slots_out; - __le32 slots_idle; - __le32 ttl_timestamp; - struct mvm_statistics_div slow_div; - __le32 rx_enable_counter; - /* - * num_of_sos_states: - * count the number of times we have to re-tune - * in order to get out of bad PHY status - */ - __le32 num_of_sos_states; - __le32 beacon_filtered; - __le32 missed_beacons; - u8 beacon_filter_average_energy; - u8 beacon_filter_reason; - u8 beacon_filter_current_energy; - u8 beacon_filter_reserved; - __le32 beacon_filter_delta_time; - struct mvm_statistics_bt_activity bt_activity; - __le64 rx_time; - __le64 on_time_rf; - __le64 on_time_scan; - __le64 tx_time; - __le32 beacon_counter[NUM_MAC_INDEX]; - u8 beacon_average_energy[NUM_MAC_INDEX]; - u8 reserved[4 - (NUM_MAC_INDEX % 4)]; -} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ - -struct mvm_statistics_rx { - struct mvm_statistics_rx_phy ofdm; - struct mvm_statistics_rx_phy cck; - struct mvm_statistics_rx_non_phy general; - struct mvm_statistics_rx_ht_phy ofdm_ht; -} __packed; /* STATISTICS_RX_API_S_VER_3 */ - -/* - * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) - * - * By default, uCode issues this notification after receiving a beacon - * while associated. To disable this behavior, set DISABLE_NOTIF flag in the - * STATISTICS_CMD (0x9c), below. - */ - -struct iwl_notif_statistics_v10 { - __le32 flag; - struct mvm_statistics_rx rx; - struct mvm_statistics_tx tx; - struct mvm_statistics_general_v8 general; -} __packed; /* STATISTICS_NTFY_API_S_VER_10 */ - -#define IWL_STATISTICS_FLG_CLEAR 0x1 -#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2 - -struct iwl_statistics_cmd { - __le32 flags; -} __packed; /* STATISTICS_CMD_API_S_VER_1 */ - -#endif /* __fw_api_stats_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h deleted file mode 100644 index eed6271d01a3..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h +++ /dev/null @@ -1,386 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __fw_api_tof_h__ -#define __fw_api_tof_h__ - -#include "fw-api.h" - -/* ToF sub-group command IDs */ -enum iwl_mvm_tof_sub_grp_ids { - TOF_RANGE_REQ_CMD = 0x1, - TOF_CONFIG_CMD = 0x2, - TOF_RANGE_ABORT_CMD = 0x3, - TOF_RANGE_REQ_EXT_CMD = 0x4, - TOF_RESPONDER_CONFIG_CMD = 0x5, - TOF_NW_INITIATED_RES_SEND_CMD = 0x6, - TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, - TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, - TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, - TOF_RANGE_RESPONSE_NOTIF = 0xFE, - TOF_MCSI_DEBUG_NOTIF = 0xFB, -}; - -/** - * struct iwl_tof_config_cmd - ToF configuration - * @tof_disabled: 0 enabled, 1 - disabled - * @one_sided_disabled: 0 enabled, 1 - disabled - * @is_debug_mode: 1 debug mode, 0 - otherwise - * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise - */ -struct iwl_tof_config_cmd { - __le32 sub_grp_cmd_id; - u8 tof_disabled; - u8 one_sided_disabled; - u8 is_debug_mode; - u8 is_buf_required; -} __packed; - -/** - * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) - * @burst_period: future use: (currently hard coded in the LMAC) - * The interval between two sequential bursts. - * @min_delta_ftm: future use: (currently hard coded in the LMAC) - * The minimum delay between two sequential FTM Responses - * in the same burst. - * @burst_duration: future use: (currently hard coded in the LMAC) - * The total time for all FTMs handshake in the same burst. - * Affect the time events duration in the LMAC. - * @num_of_burst_exp: future use: (currently hard coded in the LMAC) - * The number of bursts for the current ToF request. Affect - * the number of events allocations in the current iteration. - * @get_ch_est: for xVT only, NA for driver - * @abort_responder: when set to '1' - Responder will terminate its activity - * (all other fields in the command are ignored) - * @recv_sta_req_params: 1 - Responder will ignore the other Responder's - * params and use the recomended Initiator params. - * 0 - otherwise - * @channel_num: current AP Channel - * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rate: current AP rate - * @ctrl_ch_position: coding of the control channel position relative to - * the center frequency. - * 40MHz 0 below center, 1 above center - * 80MHz bits [0..1]: 0 the near 20MHz to the center, - * 1 the far 20MHz to the center - * bit[2] as above 40MHz - * @ftm_per_burst: FTMs per Burst - * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, - * '1' - we measure over the Initial FTM Response - * @asap_mode: ASAP / Non ASAP mode for the current WLS station - * @sta_id: index of the AP STA when in AP mode - * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF - * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug - * purposes, simulating station movement by adding various values - * to this field - * @bssid: Current AP BSSID - */ -struct iwl_tof_responder_config_cmd { - __le32 sub_grp_cmd_id; - __le16 burst_period; - u8 min_delta_ftm; - u8 burst_duration; - u8 num_of_burst_exp; - u8 get_ch_est; - u8 abort_responder; - u8 recv_sta_req_params; - u8 channel_num; - u8 bandwidth; - u8 rate; - u8 ctrl_ch_position; - u8 ftm_per_burst; - u8 ftm_resp_ts_avail; - u8 asap_mode; - u8 sta_id; - __le16 tsf_timer_offset_msecs; - __le16 toa_offset; - u8 bssid[ETH_ALEN]; -} __packed; - -/** - * struct iwl_tof_range_request_ext_cmd - extended range req for WLS - * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF - * @min_delta_ftm: Minimal time between two consecutive measurements, - * in units of 100us. 0 means no preference by station - * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended - * value be sent to the AP - * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended - * value to be sent to the AP - * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended - * value to be sent to the AP - */ -struct iwl_tof_range_req_ext_cmd { - __le32 sub_grp_cmd_id; - __le16 tsf_timer_offset_msec; - __le16 reserved; - u8 min_delta_ftm; - u8 ftm_format_and_bw20M; - u8 ftm_format_and_bw40M; - u8 ftm_format_and_bw80M; -} __packed; - -#define IWL_MVM_TOF_MAX_APS 21 - -/** - * struct iwl_tof_range_req_ap_entry - AP configuration parameters - * @channel_num: Current AP Channel - * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @tsf_delta_direction: TSF relatively to the subject AP - * @ctrl_ch_position: Coding of the control channel position relative to the - * center frequency. - * 40MHz 0 below center, 1 above center - * 80MHz bits [0..1]: 0 the near 20MHz to the center, - * 1 the far 20MHz to the center - * bit[2] as above 40MHz - * @bssid: AP's bss id - * @measure_type: Measurement type: 0 - two sided, 1 - One sided - * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the - * number of measurement iterations (min 2^0 = 1, max 2^14) - * @burst_period: Recommended value to be sent to the AP. Measurement - * periodicity In units of 100ms. ignored if num_of_bursts = 0 - * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) - * 1-sided: how many rts/cts pairs should be used per burst. - * @retries_per_sample: Max number of retries that the LMAC should send - * in case of no replies by the AP. - * @tsf_delta: TSF Delta in units of microseconds. - * The difference between the AP TSF and the device local clock. - * @location_req: Location Request Bit[0] LCI should be sent in the FTMR - * Bit[1] Civic should be sent in the FTMR - * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) - * @enable_dyn_ack: Enable Dynamic ACK BW. - * 0 Initiator interact with regular AP - * 1 Initiator interact with Responder machine: need to send the - * Initiator Acks with HT 40MHz / 80MHz, since the Responder should - * use it for its ch est measurement (this flag will be set when we - * configure the opposite machine to be Responder). - * @rssi: Last received value - * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. - */ -struct iwl_tof_range_req_ap_entry { - u8 channel_num; - u8 bandwidth; - u8 tsf_delta_direction; - u8 ctrl_ch_position; - u8 bssid[ETH_ALEN]; - u8 measure_type; - u8 num_of_bursts; - __le16 burst_period; - u8 samples_per_burst; - u8 retries_per_sample; - __le32 tsf_delta; - u8 location_req; - u8 asap_mode; - u8 enable_dyn_ack; - s8 rssi; -} __packed; - -/** - * enum iwl_tof_response_mode - * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as - * possible (not supported for this release) - * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon - * timeout expiration - * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the - * earlier of: measurements completion / timeout - * expiration. - */ -enum iwl_tof_response_mode { - IWL_MVM_TOF_RESPOSE_ASAP = 1, - IWL_MVM_TOF_RESPOSE_TIMEOUT, - IWL_MVM_TOF_RESPOSE_COMPLETE, -}; - -/** - * struct iwl_tof_range_req_cmd - start measurement cmd - * @request_id: A Token incremented per request. The same Token will be - * sent back in the range response - * @initiator: 0- NW initiated, 1 - Client Initiated - * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, - * '1' - run ML-Algo for ToF only - * @req_timeout: Requested timeout of the response in units of 100ms. - * This is equivalent to the session time configured to the - * LMAC in Initiator Request - * @report_policy: Supported partially for this release: For current release - - * the range report will be uploaded as a batch when ready or - * when the session is done (successfully / partially). - * one of iwl_tof_response_mode. - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), - * '1' Use MAC Address randomization according to the below - * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. - * Bits set to 1 shall be randomized by the UMAC - */ -struct iwl_tof_range_req_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 initiator; - u8 one_sided_los_disable; - u8 req_timeout; - u8 report_policy; - u8 los_det_disable; - u8 num_of_ap; - u8 macaddr_random; - u8 macaddr_template[ETH_ALEN]; - u8 macaddr_mask[ETH_ALEN]; - struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -/** - * struct iwl_tof_gen_resp_cmd - generic ToF response - */ -struct iwl_tof_gen_resp_cmd { - __le32 sub_grp_cmd_id; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) - * @measure_status: current APs measurement status - * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rtt: The Round Trip Time that took for the last measurement for - * current AP [nSec] - * @rtt_variance: The Variance of the RTT values measured for current AP - * @rtt_spread: The Difference between the maximum and the minimum RTT - * values measured for current AP in the current session [nsec] - * @rssi: RSSI as uploaded in the Channel Estimation notification - * @rssi_spread: The Difference between the maximum and the minimum RSSI values - * measured for current AP in the current session - * @range: Measured range [cm] - * @range_variance: Measured range variance [cm] - * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was - * uploaded by the LMAC - */ -struct iwl_tof_range_rsp_ap_entry_ntfy { - u8 bssid[ETH_ALEN]; - u8 measure_status; - u8 measure_bw; - __le32 rtt; - __le32 rtt_variance; - __le32 rtt_spread; - s8 rssi; - u8 rssi_spread; - __le16 reserved; - __le32 range; - __le32 range_variance; - __le32 timestamp; -} __packed; - -/** - * struct iwl_tof_range_rsp_ntfy - - * @request_id: A Token ID of the corresponding Range request - * @request_status: status of current measurement session - * @last_in_batch: reprot policy (when not all responses are uploaded at once) - * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - */ -struct iwl_tof_range_rsp_ntfy { - u8 request_id; - u8 request_status; - u8 last_in_batch; - u8 num_of_aps; - struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) -/** - * struct iwl_tof_mcsi_notif - used for debug - * @token: token ID for the current session - * @role: '0' - initiator, '1' - responder - * @initiator_bssid: initiator machine - * @responder_bssid: responder machine - * @mcsi_buffer: debug data - */ -struct iwl_tof_mcsi_notif { - u8 token; - u8 role; - __le16 reserved; - u8 initiator_bssid[ETH_ALEN]; - u8 responder_bssid[ETH_ALEN]; - u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; -} __packed; - -/** - * struct iwl_tof_neighbor_report_notif - * @bssid: BSSID of the AP which sent the report - * @request_token: same token as the corresponding request - * @status: - * @report_ie_len: the length of the response frame starting from the Element ID - * @data: the IEs - */ -struct iwl_tof_neighbor_report { - u8 bssid[ETH_ALEN]; - u8 request_token; - u8 status; - __le16 report_ie_len; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_abort_cmd - * @request_id: corresponds to a range request - */ -struct iwl_tof_range_abort_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 reserved[3]; -} __packed; - -#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h deleted file mode 100644 index 853698ab8b05..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ /dev/null @@ -1,646 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_tx_h__ -#define __fw_api_tx_h__ - -/** - * enum iwl_tx_flags - bitmasks for tx_flags in TX command - * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame - * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame - * @TX_CMD_FLG_ACK: expect ACK from receiving station - * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. - * Otherwise, use rate_n_flags from the TX command - * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected - * Must set TX_CMD_FLG_ACK with this flag. - * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence - * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence - * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) - * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored - * on old firmwares). - * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame - * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. - * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command - * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU - * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame - * Should be set for beacons and probe responses - * @TX_CMD_FLG_CALIB: activate PA TX power calibrations - * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count - * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. - * Should be set for 26/30 length MAC headers - * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW - * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration - * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation - * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id - * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped - * @TX_CMD_FLG_EXEC_PAPD: execute PAPD - * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power - * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk - */ -enum iwl_tx_flags { - TX_CMD_FLG_PROT_REQUIRE = BIT(0), - TX_CMD_FLG_WRITE_TX_POWER = BIT(1), - TX_CMD_FLG_ACK = BIT(3), - TX_CMD_FLG_STA_RATE = BIT(4), - TX_CMD_FLG_BAR = BIT(6), - TX_CMD_FLG_TXOP_PROT = BIT(7), - TX_CMD_FLG_VHT_NDPA = BIT(8), - TX_CMD_FLG_HT_NDPA = BIT(9), - TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), - TX_CMD_FLG_BT_PRIO_POS = 11, - TX_CMD_FLG_BT_DIS = BIT(12), - TX_CMD_FLG_SEQ_CTL = BIT(13), - TX_CMD_FLG_MORE_FRAG = BIT(14), - TX_CMD_FLG_TSF = BIT(16), - TX_CMD_FLG_CALIB = BIT(17), - TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), - TX_CMD_FLG_MH_PAD = BIT(20), - TX_CMD_FLG_RESP_TO_DRV = BIT(21), - TX_CMD_FLG_CCMP_AGG = BIT(22), - TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), - TX_CMD_FLG_DUR = BIT(25), - TX_CMD_FLG_FW_DROP = BIT(26), - TX_CMD_FLG_EXEC_PAPD = BIT(27), - TX_CMD_FLG_PAPD_TYPE = BIT(28), - TX_CMD_FLG_HCCA_CHUNK = BIT(31) -}; /* TX_FLAGS_BITS_API_S_VER_1 */ - -/** - * enum iwl_tx_pm_timeouts - pm timeout values in TX command - * @PM_FRAME_NONE: no need to suspend sleep mode - * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU - * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec - */ -enum iwl_tx_pm_timeouts { - PM_FRAME_NONE = 0, - PM_FRAME_MGMT = 2, - PM_FRAME_ASSOC = 3, -}; - -/* - * TX command security control - */ -#define TX_CMD_SEC_WEP 0x01 -#define TX_CMD_SEC_CCM 0x02 -#define TX_CMD_SEC_TKIP 0x03 -#define TX_CMD_SEC_EXT 0x04 -#define TX_CMD_SEC_MSK 0x07 -#define TX_CMD_SEC_WEP_KEY_IDX_POS 6 -#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 -#define TX_CMD_SEC_KEY128 0x08 - -/* TODO: how does these values are OK with only 16 bit variable??? */ -/* - * TX command next frame info - * - * bits 0:2 - security control (TX_CMD_SEC_*) - * bit 3 - immediate ACK required - * bit 4 - rate is taken from STA table - * bit 5 - frame belongs to BA stream - * bit 6 - immediate BA response expected - * bit 7 - unused - * bits 8:15 - Station ID - * bits 16:31 - rate - */ -#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8) -#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10) -#define TX_CMD_NEXT_FRAME_BA_MSK (0x20) -#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40) -#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8) -#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00) -#define TX_CMD_NEXT_FRAME_STA_ID_POS (8) -#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000) -#define TX_CMD_NEXT_FRAME_RATE_POS (16) - -/* - * TX command Frame life time in us - to be written in pm_frame_timeout - */ -#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF -#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ -#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ -#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 - -/* - * TID for non QoS frames - to be written in tid_tspec - */ -#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT - -/* - * Limits on the retransmissions - to be written in {data,rts}_retry_limit - */ -#define IWL_DEFAULT_TX_RETRY 15 -#define IWL_MGMT_DFAULT_RETRY_LIMIT 3 -#define IWL_RTS_DFAULT_RETRY_LIMIT 60 -#define IWL_BAR_DFAULT_RETRY_LIMIT 60 -#define IWL_LOW_RETRY_LIMIT 7 - -/* TODO: complete documentation for try_cnt and btkill_cnt */ -/** - * struct iwl_tx_cmd - TX command struct to FW - * ( TX_CMD = 0x1c ) - * @len: in bytes of the payload, see below for details - * @tx_flags: combination of TX_CMD_FLG_* - * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is - * cleared. Combination of RATE_MCS_* - * @sta_id: index of destination station in FW station table - * @sec_ctl: security control, TX_CMD_SEC_* - * @initial_rate_index: index into the the rate table for initial TX attempt. - * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. - * @key: security key - * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_* - * @life_time: frame life time (usecs??) - * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + - * btkill_cnd + reserved), first 32 bits. "0" disables usage. - * @dram_msb_ptr: upper bits of the scratch physical address - * @rts_retry_limit: max attempts for RTS - * @data_retry_limit: max attempts to send the data packet - * @tid_spec: TID/tspec - * @pm_frame_timeout: PM TX frame timeout - * - * The byte count (both len and next_frame_len) includes MAC header - * (24/26/30/32 bytes) - * + 2 bytes pad if 26/30 header size - * + 8 byte IV for CCM or TKIP (not used for WEP) - * + Data payload - * + 8-byte MIC (not used for CCM/WEP) - * It does not include post-MAC padding, i.e., - * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. - * Range of len: 14-2342 bytes. - * - * After the struct fields the MAC header is placed, plus any padding, - * and then the actial payload. - */ -struct iwl_tx_cmd { - __le16 len; - __le16 next_frame_len; - __le32 tx_flags; - struct { - u8 try_cnt; - u8 btkill_cnt; - __le16 reserved; - } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ - __le32 rate_n_flags; - u8 sta_id; - u8 sec_ctl; - u8 initial_rate_index; - u8 reserved2; - u8 key[16]; - __le32 reserved3; - __le32 life_time; - __le32 dram_lsb_ptr; - u8 dram_msb_ptr; - u8 rts_retry_limit; - u8 data_retry_limit; - u8 tid_tspec; - __le16 pm_frame_timeout; - __le16 reserved4; - u8 payload[0]; - struct ieee80211_hdr hdr[0]; -} __packed; /* TX_CMD_API_S_VER_3 */ - -/* - * TX response related data - */ - -/* - * enum iwl_tx_status - status that is returned by the fw after attempts to Tx - * @TX_STATUS_SUCCESS: - * @TX_STATUS_DIRECT_DONE: - * @TX_STATUS_POSTPONE_DELAY: - * @TX_STATUS_POSTPONE_FEW_BYTES: - * @TX_STATUS_POSTPONE_BT_PRIO: - * @TX_STATUS_POSTPONE_QUIET_PERIOD: - * @TX_STATUS_POSTPONE_CALC_TTAK: - * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: - * @TX_STATUS_FAIL_SHORT_LIMIT: - * @TX_STATUS_FAIL_LONG_LIMIT: - * @TX_STATUS_FAIL_UNDERRUN: - * @TX_STATUS_FAIL_DRAIN_FLOW: - * @TX_STATUS_FAIL_RFKILL_FLUSH: - * @TX_STATUS_FAIL_LIFE_EXPIRE: - * @TX_STATUS_FAIL_DEST_PS: - * @TX_STATUS_FAIL_HOST_ABORTED: - * @TX_STATUS_FAIL_BT_RETRY: - * @TX_STATUS_FAIL_STA_INVALID: - * @TX_TATUS_FAIL_FRAG_DROPPED: - * @TX_STATUS_FAIL_TID_DISABLE: - * @TX_STATUS_FAIL_FIFO_FLUSHED: - * @TX_STATUS_FAIL_SMALL_CF_POLL: - * @TX_STATUS_FAIL_FW_DROP: - * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and - * STA table - * @TX_FRAME_STATUS_INTERNAL_ABORT: - * @TX_MODE_MSK: - * @TX_MODE_NO_BURST: - * @TX_MODE_IN_BURST_SEQ: - * @TX_MODE_FIRST_IN_BURST: - * @TX_QUEUE_NUM_MSK: - * - * Valid only if frame_count =1 - * TODO: complete documentation - */ -enum iwl_tx_status { - TX_STATUS_MSK = 0x000000ff, - TX_STATUS_SUCCESS = 0x01, - TX_STATUS_DIRECT_DONE = 0x02, - /* postpone TX */ - TX_STATUS_POSTPONE_DELAY = 0x40, - TX_STATUS_POSTPONE_FEW_BYTES = 0x41, - TX_STATUS_POSTPONE_BT_PRIO = 0x42, - TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, - TX_STATUS_POSTPONE_CALC_TTAK = 0x44, - /* abort TX */ - TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, - TX_STATUS_FAIL_SHORT_LIMIT = 0x82, - TX_STATUS_FAIL_LONG_LIMIT = 0x83, - TX_STATUS_FAIL_UNDERRUN = 0x84, - TX_STATUS_FAIL_DRAIN_FLOW = 0x85, - TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, - TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, - TX_STATUS_FAIL_DEST_PS = 0x88, - TX_STATUS_FAIL_HOST_ABORTED = 0x89, - TX_STATUS_FAIL_BT_RETRY = 0x8a, - TX_STATUS_FAIL_STA_INVALID = 0x8b, - TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, - TX_STATUS_FAIL_TID_DISABLE = 0x8d, - TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, - TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, - TX_STATUS_FAIL_FW_DROP = 0x90, - TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, - TX_STATUS_INTERNAL_ABORT = 0x92, - TX_MODE_MSK = 0x00000f00, - TX_MODE_NO_BURST = 0x00000000, - TX_MODE_IN_BURST_SEQ = 0x00000100, - TX_MODE_FIRST_IN_BURST = 0x00000200, - TX_QUEUE_NUM_MSK = 0x0001f000, - TX_NARROW_BW_MSK = 0x00060000, - TX_NARROW_BW_1DIV2 = 0x00020000, - TX_NARROW_BW_1DIV4 = 0x00040000, - TX_NARROW_BW_1DIV8 = 0x00060000, -}; - -/* - * enum iwl_tx_agg_status - TX aggregation status - * @AGG_TX_STATE_STATUS_MSK: - * @AGG_TX_STATE_TRANSMITTED: - * @AGG_TX_STATE_UNDERRUN: - * @AGG_TX_STATE_BT_PRIO: - * @AGG_TX_STATE_FEW_BYTES: - * @AGG_TX_STATE_ABORT: - * @AGG_TX_STATE_LAST_SENT_TTL: - * @AGG_TX_STATE_LAST_SENT_TRY_CNT: - * @AGG_TX_STATE_LAST_SENT_BT_KILL: - * @AGG_TX_STATE_SCD_QUERY: - * @AGG_TX_STATE_TEST_BAD_CRC32: - * @AGG_TX_STATE_RESPONSE: - * @AGG_TX_STATE_DUMP_TX: - * @AGG_TX_STATE_DELAY_TX: - * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries - * occur if tx failed for this frame when it was a member of a previous - * aggregation block). If rate scaling is used, retry count indicates the - * rate table entry used for all frames in the new agg. - *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for - * this frame - * - * TODO: complete documentation - */ -enum iwl_tx_agg_status { - AGG_TX_STATE_STATUS_MSK = 0x00fff, - AGG_TX_STATE_TRANSMITTED = 0x000, - AGG_TX_STATE_UNDERRUN = 0x001, - AGG_TX_STATE_BT_PRIO = 0x002, - AGG_TX_STATE_FEW_BYTES = 0x004, - AGG_TX_STATE_ABORT = 0x008, - AGG_TX_STATE_LAST_SENT_TTL = 0x010, - AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, - AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, - AGG_TX_STATE_SCD_QUERY = 0x080, - AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, - AGG_TX_STATE_RESPONSE = 0x1ff, - AGG_TX_STATE_DUMP_TX = 0x200, - AGG_TX_STATE_DELAY_TX = 0x400, - AGG_TX_STATE_TRY_CNT_POS = 12, - AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, -}; - -#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ - AGG_TX_STATE_LAST_SENT_TRY_CNT| \ - AGG_TX_STATE_LAST_SENT_BT_KILL) - -/* - * The mask below describes a status where we are absolutely sure that the MPDU - * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've - * written the bytes to the TXE, but we know nothing about what the DSP did. - */ -#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ - AGG_TX_STATE_ABORT | \ - AGG_TX_STATE_SCD_QUERY) - -/* - * REPLY_TX = 0x1c (response) - * - * This response may be in one of two slightly different formats, indicated - * by the frame_count field: - * - * 1) No aggregation (frame_count == 1). This reports Tx results for a single - * frame. Multiple attempts, at various bit rates, may have been made for - * this frame. - * - * 2) Aggregation (frame_count > 1). This reports Tx results for two or more - * frames that used block-acknowledge. All frames were transmitted at - * same rate. Rate scaling may have been used if first frame in this new - * agg block failed in previous agg block(s). - * - * Note that, for aggregation, ACK (block-ack) status is not delivered - * here; block-ack has not been received by the time the device records - * this status. - * This status relates to reasons the tx might have been blocked or aborted - * within the device, rather than whether it was received successfully by - * the destination station. - */ - -/** - * struct agg_tx_status - per packet TX aggregation status - * @status: enum iwl_tx_agg_status - * @sequence: Sequence # for this frame's Tx cmd (not SSN!) - */ -struct agg_tx_status { - __le16 status; - __le16 sequence; -} __packed; - -/* - * definitions for initial rate index field - * bits [3:0] initial rate index - * bits [6:4] rate table color, used for the initial rate - * bit-7 invalid rate indication - */ -#define TX_RES_INIT_RATE_INDEX_MSK 0x0f -#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 -#define TX_RES_INV_RATE_INDEX_MSK 0x80 - -#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) -#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) - -/** - * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet - * ( REPLY_TX = 0x1c ) - * @frame_count: 1 no aggregation, >1 aggregation - * @bt_kill_count: num of times blocked by bluetooth (unused for agg) - * @failure_rts: num of failures due to unsuccessful RTS - * @failure_frame: num failures due to no ACK (unused for agg) - * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the - * Tx of all the batch. RATE_MCS_* - * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. - * for agg: RTS + CTS + aggregation tx time + block-ack time. - * in usec. - * @pa_status: tx power info - * @pa_integ_res_a: tx power info - * @pa_integ_res_b: tx power info - * @pa_integ_res_c: tx power info - * @measurement_req_id: tx power info - * @tfd_info: TFD information set by the FH - * @seq_ctl: sequence control from the Tx cmd - * @byte_cnt: byte count from the Tx cmd - * @tlc_info: TLC rate info - * @ra_tid: bits [3:0] = ra, bits [7:4] = tid - * @frame_ctrl: frame control - * @status: for non-agg: frame status TX_STATUS_* - * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields - * follow this one, up to frame_count. - * - * After the array of statuses comes the SSN of the SCD. Look at - * %iwl_mvm_get_scd_ssn for more details. - */ -struct iwl_mvm_tx_resp { - u8 frame_count; - u8 bt_kill_count; - u8 failure_rts; - u8 failure_frame; - __le32 initial_rate; - __le16 wireless_media_time; - - u8 pa_status; - u8 pa_integ_res_a[3]; - u8 pa_integ_res_b[3]; - u8 pa_integ_res_c[3]; - __le16 measurement_req_id; - u8 reduced_tpc; - u8 reserved; - - __le32 tfd_info; - __le16 seq_ctl; - __le16 byte_cnt; - u8 tlc_info; - u8 ra_tid; - __le16 frame_ctrl; - - struct agg_tx_status status; -} __packed; /* TX_RSP_API_S_VER_3 */ - -/** - * struct iwl_mvm_ba_notif - notifies about reception of BA - * ( BA_NOTIF = 0xc5 ) - * @sta_addr_lo32: lower 32 bits of the MAC address - * @sta_addr_hi16: upper 16 bits of the MAC address - * @sta_id: Index of recipient (BA-sending) station in fw's station table - * @tid: tid of the session - * @seq_ctl: - * @bitmap: the bitmap of the BA notification as seen in the air - * @scd_flow: the tx queue this BA relates to - * @scd_ssn: the index of the last contiguously sent packet - * @txed: number of Txed frames in this batch - * @txed_2_done: number of Acked frames in this batch - */ -struct iwl_mvm_ba_notif { - __le32 sta_addr_lo32; - __le16 sta_addr_hi16; - __le16 reserved; - - u8 sta_id; - u8 tid; - __le16 seq_ctl; - __le64 bitmap; - __le16 scd_flow; - __le16 scd_ssn; - u8 txed; - u8 txed_2_done; - __le16 reserved1; -} __packed; - -/* - * struct iwl_mac_beacon_cmd - beacon template command - * @tx: the tx commands associated with the beacon frame - * @template_id: currently equal to the mac context id of the coresponding - * mac. - * @tim_idx: the offset of the tim IE in the beacon - * @tim_size: the length of the tim IE - * @frame: the template of the beacon frame - */ -struct iwl_mac_beacon_cmd { - struct iwl_tx_cmd tx; - __le32 template_id; - __le32 tim_idx; - __le32 tim_size; - struct ieee80211_hdr frame[0]; -} __packed; - -struct iwl_beacon_notif { - struct iwl_mvm_tx_resp beacon_notify_hdr; - __le64 tsf; - __le32 ibss_mgr_status; -} __packed; - -/** - * struct iwl_extended_beacon_notif - notifies about beacon transmission - * @beacon_notify_hdr: tx response command associated with the beacon - * @tsf: last beacon tsf - * @ibss_mgr_status: whether IBSS is manager - * @gp2: last beacon time in gp2 - */ -struct iwl_extended_beacon_notif { - struct iwl_mvm_tx_resp beacon_notify_hdr; - __le64 tsf; - __le32 ibss_mgr_status; - __le32 gp2; -} __packed; /* BEACON_NTFY_API_S_VER_5 */ - -/** - * enum iwl_dump_control - dump (flush) control flags - * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty - * and the TFD queues are empty. - */ -enum iwl_dump_control { - DUMP_TX_FIFO_FLUSH = BIT(1), -}; - -/** - * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command - * @queues_ctl: bitmap of queues to flush - * @flush_ctl: control flags - * @reserved: reserved - */ -struct iwl_tx_path_flush_cmd { - __le32 queues_ctl; - __le16 flush_ctl; - __le16 reserved; -} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ - -/** - * iwl_mvm_get_scd_ssn - returns the SSN of the SCD - * @tx_resp: the Tx response from the fw (agg or non-agg) - * - * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since - * it can't know that everything will go well until the end of the AMPDU, it - * can't know in advance the number of MPDUs that will be sent in the current - * batch. This is why it writes the agg Tx response while it fetches the MPDUs. - * Hence, it can't know in advance what the SSN of the SCD will be at the end - * of the batch. This is why the SSN of the SCD is written at the end of the - * whole struct at a variable offset. This function knows how to cope with the - * variable offset and returns the SSN of the SCD. - */ -static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) -{ - return le32_to_cpup((__le32 *)&tx_resp->status + - tx_resp->frame_count) & 0xfff; -} - -/** - * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command - * @token: - * @sta_id: station id - * @tid: - * @scd_queue: scheduler queue to confiug - * @enable: 1 queue enable, 0 queue disable - * @aggregate: 1 aggregated queue, 0 otherwise - * @tx_fifo: %enum iwl_mvm_tx_fifo - * @window: BA window size - * @ssn: SSN for the BA agreement - */ -struct iwl_scd_txq_cfg_cmd { - u8 token; - u8 sta_id; - u8 tid; - u8 scd_queue; - u8 enable; - u8 aggregate; - u8 tx_fifo; - u8 window; - __le16 ssn; - __le16 reserved; -} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ - -/** - * struct iwl_scd_txq_cfg_rsp - * @token: taken from the command - * @sta_id: station id from the command - * @tid: tid from the command - * @scd_queue: scd_queue from the command - */ -struct iwl_scd_txq_cfg_rsp { - u8 token; - u8 sta_id; - u8 tid; - u8 scd_queue; -} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ - -#endif /* __fw_api_tx_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h deleted file mode 100644 index 181590fbd3b3..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ /dev/null @@ -1,1773 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_h__ -#define __fw_api_h__ - -#include "fw-api-rs.h" -#include "fw-api-rx.h" -#include "fw-api-tx.h" -#include "fw-api-sta.h" -#include "fw-api-mac.h" -#include "fw-api-power.h" -#include "fw-api-d3.h" -#include "fw-api-coex.h" -#include "fw-api-scan.h" -#include "fw-api-stats.h" -#include "fw-api-tof.h" - -/* Tx queue numbers */ -enum { - IWL_MVM_OFFCHANNEL_QUEUE = 8, - IWL_MVM_CMD_QUEUE = 9, -}; - -enum iwl_mvm_tx_fifo { - IWL_MVM_TX_FIFO_BK = 0, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_MCAST = 5, - IWL_MVM_TX_FIFO_CMD = 7, -}; - -#define IWL_MVM_STATION_COUNT 16 - -#define IWL_MVM_TDLS_STA_COUNT 4 - -/* commands */ -enum { - MVM_ALIVE = 0x1, - REPLY_ERROR = 0x2, - ECHO_CMD = 0x3, - - INIT_COMPLETE_NOTIF = 0x4, - - /* PHY context commands */ - PHY_CONTEXT_CMD = 0x8, - DBG_CFG = 0x9, - ANTENNA_COUPLING_NOTIFICATION = 0xa, - - /* UMAC scan commands */ - SCAN_ITERATION_COMPLETE_UMAC = 0xb5, - SCAN_CFG_CMD = 0xc, - SCAN_REQ_UMAC = 0xd, - SCAN_ABORT_UMAC = 0xe, - SCAN_COMPLETE_UMAC = 0xf, - - /* station table */ - ADD_STA_KEY = 0x17, - ADD_STA = 0x18, - REMOVE_STA = 0x19, - - /* paging get item */ - FW_GET_ITEM_CMD = 0x1a, - - /* TX */ - TX_CMD = 0x1c, - TXPATH_FLUSH = 0x1e, - MGMT_MCAST_KEY = 0x1f, - - /* scheduler config */ - SCD_QUEUE_CFG = 0x1d, - - /* global key */ - WEP_KEY = 0x20, - - /* Memory */ - SHARED_MEM_CFG = 0x25, - - /* TDLS */ - TDLS_CHANNEL_SWITCH_CMD = 0x27, - TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, - TDLS_CONFIG_CMD = 0xa7, - - /* MAC and Binding commands */ - MAC_CONTEXT_CMD = 0x28, - TIME_EVENT_CMD = 0x29, /* both CMD and response */ - TIME_EVENT_NOTIFICATION = 0x2a, - BINDING_CONTEXT_CMD = 0x2b, - TIME_QUOTA_CMD = 0x2c, - NON_QOS_TX_COUNTER_CMD = 0x2d, - - LQ_CMD = 0x4e, - - /* paging block to FW cpu2 */ - FW_PAGING_BLOCK_CMD = 0x4f, - - /* Scan offload */ - SCAN_OFFLOAD_REQUEST_CMD = 0x51, - SCAN_OFFLOAD_ABORT_CMD = 0x52, - HOT_SPOT_CMD = 0x53, - SCAN_OFFLOAD_COMPLETE = 0x6D, - SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, - SCAN_OFFLOAD_CONFIG_CMD = 0x6f, - MATCH_FOUND_NOTIFICATION = 0xd9, - SCAN_ITERATION_COMPLETE = 0xe7, - - /* Phy */ - PHY_CONFIGURATION_CMD = 0x6a, - CALIB_RES_NOTIF_PHY_DB = 0x6b, - /* PHY_DB_CMD = 0x6c, */ - - /* ToF - 802.11mc FTM */ - TOF_CMD = 0x10, - TOF_NOTIFICATION = 0x11, - - /* Power - legacy power table command */ - POWER_TABLE_CMD = 0x77, - PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, - LTR_CONFIG = 0xee, - - /* Thermal Throttling*/ - REPLY_THERMAL_MNG_BACKOFF = 0x7e, - - /* Set/Get DC2DC frequency tune */ - DC2DC_CONFIG_CMD = 0x83, - - /* NVM */ - NVM_ACCESS_CMD = 0x88, - - SET_CALIB_DEFAULT_CMD = 0x8e, - - BEACON_NOTIFICATION = 0x90, - BEACON_TEMPLATE_CMD = 0x91, - TX_ANT_CONFIGURATION_CMD = 0x98, - STATISTICS_CMD = 0x9c, - STATISTICS_NOTIFICATION = 0x9d, - EOSP_NOTIFICATION = 0x9e, - REDUCE_TX_POWER_CMD = 0x9f, - - /* RF-KILL commands and notifications */ - CARD_STATE_CMD = 0xa0, - CARD_STATE_NOTIFICATION = 0xa1, - - MISSED_BEACONS_NOTIFICATION = 0xa2, - - /* Power - new power table command */ - MAC_PM_POWER_TABLE = 0xa9, - - MFUART_LOAD_NOTIFICATION = 0xb1, - - REPLY_RX_PHY_CMD = 0xc0, - REPLY_RX_MPDU_CMD = 0xc1, - BA_NOTIF = 0xc5, - - /* Location Aware Regulatory */ - MCC_UPDATE_CMD = 0xc8, - MCC_CHUB_UPDATE_CMD = 0xc9, - - MARKER_CMD = 0xcb, - - /* BT Coex */ - BT_COEX_PRIO_TABLE = 0xcc, - BT_COEX_PROT_ENV = 0xcd, - BT_PROFILE_NOTIFICATION = 0xce, - BT_CONFIG = 0x9b, - BT_COEX_UPDATE_SW_BOOST = 0x5a, - BT_COEX_UPDATE_CORUN_LUT = 0x5b, - BT_COEX_UPDATE_REDUCED_TXP = 0x5c, - BT_COEX_CI = 0x5d, - - REPLY_SF_CFG_CMD = 0xd1, - REPLY_BEACON_FILTERING_CMD = 0xd2, - - /* DTS measurements */ - CMD_DTS_MEASUREMENT_TRIGGER = 0xdc, - DTS_MEASUREMENT_NOTIFICATION = 0xdd, - - REPLY_DEBUG_CMD = 0xf0, - DEBUG_LOG_MSG = 0xf7, - - BCAST_FILTER_CMD = 0xcf, - MCAST_FILTER_CMD = 0xd0, - - /* D3 commands/notifications */ - D3_CONFIG_CMD = 0xd3, - PROT_OFFLOAD_CONFIG_CMD = 0xd4, - OFFLOADS_QUERY_CMD = 0xd5, - REMOTE_WAKE_CONFIG_CMD = 0xd6, - D0I3_END_CMD = 0xed, - - /* for WoWLAN in particular */ - WOWLAN_PATTERNS = 0xe0, - WOWLAN_CONFIGURATION = 0xe1, - WOWLAN_TSC_RSC_PARAM = 0xe2, - WOWLAN_TKIP_PARAM = 0xe3, - WOWLAN_KEK_KCK_MATERIAL = 0xe4, - WOWLAN_GET_STATUSES = 0xe5, - WOWLAN_TX_POWER_PER_DB = 0xe6, - - /* and for NetDetect */ - SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, - SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58, - SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59, - - REPLY_MAX = 0xff, -}; - -enum iwl_phy_ops_subcmd_ids { - CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, - DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, -}; - -/* command groups */ -enum { - PHY_OPS_GROUP = 0x4, -}; - -/** - * struct iwl_cmd_response - generic response struct for most commands - * @status: status of the command asked, changes for each one - */ -struct iwl_cmd_response { - __le32 status; -}; - -/* - * struct iwl_tx_ant_cfg_cmd - * @valid: valid antenna configuration - */ -struct iwl_tx_ant_cfg_cmd { - __le32 valid; -} __packed; - -/* - * Calibration control struct. - * Sent as part of the phy configuration command. - * @flow_trigger: bitmap for which calibrations to perform according to - * flow triggers. - * @event_trigger: bitmap for which calibrations to perform according to - * event triggers. - */ -struct iwl_calib_ctrl { - __le32 flow_trigger; - __le32 event_trigger; -} __packed; - -/* This enum defines the bitmap of various calibrations to enable in both - * init ucode and runtime ucode through CALIBRATION_CFG_CMD. - */ -enum iwl_calib_cfg { - IWL_CALIB_CFG_XTAL_IDX = BIT(0), - IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), - IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), - IWL_CALIB_CFG_PAPD_IDX = BIT(3), - IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), - IWL_CALIB_CFG_DC_IDX = BIT(5), - IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), - IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), - IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), - IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), - IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), - IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), - IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), - IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), - IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), - IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), - IWL_CALIB_CFG_DAC_IDX = BIT(16), - IWL_CALIB_CFG_ABS_IDX = BIT(17), - IWL_CALIB_CFG_AGC_IDX = BIT(18), -}; - -/* - * Phy configuration command. - */ -struct iwl_phy_cfg_cmd { - __le32 phy_cfg; - struct iwl_calib_ctrl calib_control; -} __packed; - -#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) -#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) -#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) -#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) -#define PHY_CFG_TX_CHAIN_A BIT(8) -#define PHY_CFG_TX_CHAIN_B BIT(9) -#define PHY_CFG_TX_CHAIN_C BIT(10) -#define PHY_CFG_RX_CHAIN_A BIT(12) -#define PHY_CFG_RX_CHAIN_B BIT(13) -#define PHY_CFG_RX_CHAIN_C BIT(14) - - -/* Target of the NVM_ACCESS_CMD */ -enum { - NVM_ACCESS_TARGET_CACHE = 0, - NVM_ACCESS_TARGET_OTP = 1, - NVM_ACCESS_TARGET_EEPROM = 2, -}; - -/* Section types for NVM_ACCESS_CMD */ -enum { - NVM_SECTION_TYPE_SW = 1, - NVM_SECTION_TYPE_REGULATORY = 3, - NVM_SECTION_TYPE_CALIBRATION = 4, - NVM_SECTION_TYPE_PRODUCTION = 5, - NVM_SECTION_TYPE_MAC_OVERRIDE = 11, - NVM_SECTION_TYPE_PHY_SKU = 12, - NVM_MAX_NUM_SECTIONS = 13, -}; - -/** - * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section - * @op_code: 0 - read, 1 - write - * @target: NVM_ACCESS_TARGET_* - * @type: NVM_SECTION_TYPE_* - * @offset: offset in bytes into the section - * @length: in bytes, to read/write - * @data: if write operation, the data to write. On read its empty - */ -struct iwl_nvm_access_cmd { - u8 op_code; - u8 target; - __le16 type; - __le16 offset; - __le16 length; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ - -#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ - -/* - * struct iwl_fw_paging_cmd - paging layout - * - * (FW_PAGING_BLOCK_CMD = 0x4f) - * - * Send to FW the paging layout in the driver. - * - * @flags: various flags for the command - * @block_size: the block size in powers of 2 - * @block_num: number of blocks specified in the command. - * @device_phy_addr: virtual addresses from device side -*/ -struct iwl_fw_paging_cmd { - __le32 flags; - __le32 block_size; - __le32 block_num; - __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; -} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ - -/* - * Fw items ID's - * - * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload - * download - */ -enum iwl_fw_item_id { - IWL_FW_ITEM_ID_PAGING = 3, -}; - -/* - * struct iwl_fw_get_item_cmd - get an item from the fw - */ -struct iwl_fw_get_item_cmd { - __le32 item_id; -} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ - -struct iwl_fw_get_item_resp { - __le32 item_id; - __le32 item_byte_cnt; - __le32 item_val; -} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ - -/** - * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD - * @offset: offset in bytes into the section - * @length: in bytes, either how much was written or read - * @type: NVM_SECTION_TYPE_* - * @status: 0 for success, fail otherwise - * @data: if read operation, the data returned. Empty on write. - */ -struct iwl_nvm_access_resp { - __le16 offset; - __le16 length; - __le16 type; - __le16 status; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ - -/* MVM_ALIVE 0x1 */ - -/* alive response is_valid values */ -#define ALIVE_RESP_UCODE_OK BIT(0) -#define ALIVE_RESP_RFKILL BIT(1) - -/* alive response ver_type values */ -enum { - FW_TYPE_HW = 0, - FW_TYPE_PROT = 1, - FW_TYPE_AP = 2, - FW_TYPE_WOWLAN = 3, - FW_TYPE_TIMING = 4, - FW_TYPE_WIPAN = 5 -}; - -/* alive response ver_subtype values */ -enum { - FW_SUBTYPE_FULL_FEATURE = 0, - FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ - FW_SUBTYPE_REDUCED = 2, - FW_SUBTYPE_ALIVE_ONLY = 3, - FW_SUBTYPE_WOWLAN = 4, - FW_SUBTYPE_AP_SUBTYPE = 5, - FW_SUBTYPE_WIPAN = 6, - FW_SUBTYPE_INITIALIZE = 9 -}; - -#define IWL_ALIVE_STATUS_ERR 0xDEAD -#define IWL_ALIVE_STATUS_OK 0xCAFE - -#define IWL_ALIVE_FLG_RFKILL BIT(0) - -struct mvm_alive_resp_ver1 { - __le16 status; - __le16 flags; - u8 ucode_minor; - u8 ucode_major; - __le16 id; - u8 api_minor; - u8 api_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le16 reserved2; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ -} __packed; /* ALIVE_RES_API_S_VER_1 */ - -struct mvm_alive_resp_ver2 { - __le16 status; - __le16 flags; - u8 ucode_minor; - u8 ucode_major; - __le16 id; - u8 api_minor; - u8 api_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le16 reserved2; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; - u8 umac_minor; /* UMAC version: minor */ - u8 umac_major; /* UMAC version: major */ - __le16 umac_id; /* UMAC version: id */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* ALIVE_RES_API_S_VER_2 */ - -struct mvm_alive_resp { - __le16 status; - __le16 flags; - __le32 ucode_minor; - __le32 ucode_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; - __le32 umac_minor; /* UMAC version: minor */ - __le32 umac_major; /* UMAC version: major */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* ALIVE_RES_API_S_VER_3 */ - -/* Error response/notification */ -enum { - FW_ERR_UNKNOWN_CMD = 0x0, - FW_ERR_INVALID_CMD_PARAM = 0x1, - FW_ERR_SERVICE = 0x2, - FW_ERR_ARC_MEMORY = 0x3, - FW_ERR_ARC_CODE = 0x4, - FW_ERR_WATCH_DOG = 0x5, - FW_ERR_WEP_GRP_KEY_INDX = 0x10, - FW_ERR_WEP_KEY_SIZE = 0x11, - FW_ERR_OBSOLETE_FUNC = 0x12, - FW_ERR_UNEXPECTED = 0xFE, - FW_ERR_FATAL = 0xFF -}; - -/** - * struct iwl_error_resp - FW error indication - * ( REPLY_ERROR = 0x2 ) - * @error_type: one of FW_ERR_* - * @cmd_id: the command ID for which the error occured - * @bad_cmd_seq_num: sequence number of the erroneous command - * @error_service: which service created the error, applicable only if - * error_type = 2, otherwise 0 - * @timestamp: TSF in usecs. - */ -struct iwl_error_resp { - __le32 error_type; - u8 cmd_id; - u8 reserved1; - __le16 bad_cmd_seq_num; - __le32 error_service; - __le64 timestamp; -} __packed; - - -/* Common PHY, MAC and Bindings definitions */ - -#define MAX_MACS_IN_BINDING (3) -#define MAX_BINDINGS (4) -#define AUX_BINDING_INDEX (3) -#define MAX_PHYS (4) - -/* Used to extract ID and color from the context dword */ -#define FW_CTXT_ID_POS (0) -#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS) -#define FW_CTXT_COLOR_POS (8) -#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS) -#define FW_CTXT_INVALID (0xffffffff) - -#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\ - (_color << FW_CTXT_COLOR_POS)) - -/* Possible actions on PHYs, MACs and Bindings */ -enum { - FW_CTXT_ACTION_STUB = 0, - FW_CTXT_ACTION_ADD, - FW_CTXT_ACTION_MODIFY, - FW_CTXT_ACTION_REMOVE, - FW_CTXT_ACTION_NUM -}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ - -/* Time Events */ - -/* Time Event types, according to MAC type */ -enum iwl_time_event_type { - /* BSS Station Events */ - TE_BSS_STA_AGGRESSIVE_ASSOC, - TE_BSS_STA_ASSOC, - TE_BSS_EAP_DHCP_PROT, - TE_BSS_QUIET_PERIOD, - - /* P2P Device Events */ - TE_P2P_DEVICE_DISCOVERABLE, - TE_P2P_DEVICE_LISTEN, - TE_P2P_DEVICE_ACTION_SCAN, - TE_P2P_DEVICE_FULL_SCAN, - - /* P2P Client Events */ - TE_P2P_CLIENT_AGGRESSIVE_ASSOC, - TE_P2P_CLIENT_ASSOC, - TE_P2P_CLIENT_QUIET_PERIOD, - - /* P2P GO Events */ - TE_P2P_GO_ASSOC_PROT, - TE_P2P_GO_REPETITIVE_NOA, - TE_P2P_GO_CT_WINDOW, - - /* WiDi Sync Events */ - TE_WIDI_TX_SYNC, - - /* Channel Switch NoA */ - TE_CHANNEL_SWITCH_PERIOD, - - TE_MAX -}; /* MAC_EVENT_TYPE_API_E_VER_1 */ - - - -/* Time event - defines for command API v1 */ - -/* - * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V1_FRAG_NONE = 0, - TE_V1_FRAG_SINGLE = 1, - TE_V1_FRAG_DUAL = 2, - TE_V1_FRAG_ENDLESS = 0xffffffff -}; - -/* If a Time Event can be fragmented, this is the max number of fragments */ -#define TE_V1_FRAG_MAX_MSK 0x0fffffff -/* Repeat the time event endlessly (until removed) */ -#define TE_V1_REPEAT_ENDLESS 0xffffffff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff - -/* Time Event dependencies: none, on another TE, or in a specific time */ -enum { - TE_V1_INDEPENDENT = 0, - TE_V1_DEP_OTHER = BIT(0), - TE_V1_DEP_TSF = BIT(1), - TE_V1_EVENT_SOCIOPATHIC = BIT(2), -}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ - -/* - * @TE_V1_NOTIF_NONE: no notifications - * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. - * - * Supported Time event notifications configuration. - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - */ -enum { - TE_V1_NOTIF_NONE = 0, - TE_V1_NOTIF_HOST_EVENT_START = BIT(0), - TE_V1_NOTIF_HOST_EVENT_END = BIT(1), - TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), - TE_V1_NOTIF_HOST_FRAG_START = BIT(4), - TE_V1_NOTIF_HOST_FRAG_END = BIT(5), - TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), -}; /* MAC_EVENT_ACTION_API_E_VER_2 */ - -/* Time event - defines for command API */ - -/* - * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V2_FRAG_NONE = 0, - TE_V2_FRAG_SINGLE = 1, - TE_V2_FRAG_DUAL = 2, - TE_V2_FRAG_MAX = 0xfe, - TE_V2_FRAG_ENDLESS = 0xff -}; - -/* Repeat the time event endlessly (until removed) */ -#define TE_V2_REPEAT_ENDLESS 0xff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V2_REPEAT_MAX 0xfe - -#define TE_V2_PLACEMENT_POS 12 -#define TE_V2_ABSENCE_POS 15 - -/* Time event policy values - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - * - * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable - * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @TE_V2_DEP_OTHER: depends on another time event - * @TE_V2_DEP_TSF: depends on a specific time - * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC - * @TE_V2_ABSENCE: are we present or absent during the Time Event. - */ -enum { - TE_V2_DEFAULT_POLICY = 0x0, - - /* notifications (event start/stop, fragment start/stop) */ - TE_V2_NOTIF_HOST_EVENT_START = BIT(0), - TE_V2_NOTIF_HOST_EVENT_END = BIT(1), - TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), - - TE_V2_NOTIF_HOST_FRAG_START = BIT(4), - TE_V2_NOTIF_HOST_FRAG_END = BIT(5), - TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), - - TE_V2_NOTIF_MSK = 0xff, - - /* placement characteristics */ - TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), - TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), - TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), - - /* are we present or absent during the Time Event. */ - TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), -}; - -/** - * struct iwl_time_event_cmd_api - configuring Time Events - * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also - * with version 1. determined by IWL_UCODE_TLV_FLAGS) - * ( TIME_EVENT_CMD = 0x29 ) - * @id_and_color: ID and color of the relevant MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @id: this field has two meanings, depending on the action: - * If the action is ADD, then it means the type of event to add. - * For all other actions it is the unique event ID assigned when the - * event was added by the FW. - * @apply_time: When to start the Time Event (in GP2) - * @max_delay: maximum delay to event's start (apply time), in TU - * @depends_on: the unique ID of the event we depend on (if any) - * @interval: interval between repetitions, in TU - * @duration: duration of event in TU - * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS - * @max_frags: maximal number of fragments the Time Event can be divided to - * @policy: defines whether uCode shall notify the host or other uCode modules - * on event and/or fragment start and/or end - * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF - * TE_EVENT_SOCIOPATHIC - * using TE_ABSENCE and using TE_NOTIF_* - */ -struct iwl_time_event_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - __le32 id; - /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ - __le32 apply_time; - __le32 max_delay; - __le32 depends_on; - __le32 interval; - __le32 duration; - u8 repeat; - u8 max_frags; - __le16 policy; -} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ - -/** - * struct iwl_time_event_resp - response structure to iwl_time_event_cmd - * @status: bit 0 indicates success, all others specify errors - * @id: the Time Event type - * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE - * @id_and_color: ID and color of the relevant MAC - */ -struct iwl_time_event_resp { - __le32 status; - __le32 id; - __le32 unique_id; - __le32 id_and_color; -} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ - -/** - * struct iwl_time_event_notif - notifications of time event start/stop - * ( TIME_EVENT_NOTIFICATION = 0x2a ) - * @timestamp: action timestamp in GP2 - * @session_id: session's unique id - * @unique_id: unique id of the Time Event itself - * @id_and_color: ID and color of the relevant MAC - * @action: one of TE_NOTIF_START or TE_NOTIF_END - * @status: true if scheduled, false otherwise (not executed) - */ -struct iwl_time_event_notif { - __le32 timestamp; - __le32 session_id; - __le32 unique_id; - __le32 id_and_color; - __le32 action; - __le32 status; -} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ - - -/* Bindings and Time Quota */ - -/** - * struct iwl_binding_cmd - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding - * @phy: PHY id and color which belongs to the binding - */ -struct iwl_binding_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; -} __packed; /* BINDING_CMD_API_S_VER_1 */ - -/* The maximal number of fragments in the FW's schedule session */ -#define IWL_MVM_MAX_QUOTA 128 - -/** - * struct iwl_time_quota_data - configuration of time quota per binding - * @id_and_color: ID and color of the relevant Binding - * @quota: absolute time quota in TU. The scheduler will try to divide the - * remainig quota (after Time Events) according to this quota. - * @max_duration: max uninterrupted context duration in TU - */ -struct iwl_time_quota_data { - __le32 id_and_color; - __le32 quota; - __le32 max_duration; -} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ - -/** - * struct iwl_time_quota_cmd - configuration of time quota between bindings - * ( TIME_QUOTA_CMD = 0x2c ) - * @quotas: allocations per binding - */ -struct iwl_time_quota_cmd { - struct iwl_time_quota_data quotas[MAX_BINDINGS]; -} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ - - -/* PHY context */ - -/* Supported bands */ -#define PHY_BAND_5 (0) -#define PHY_BAND_24 (1) - -/* Supported channel width, vary if there is VHT support */ -#define PHY_VHT_CHANNEL_MODE20 (0x0) -#define PHY_VHT_CHANNEL_MODE40 (0x1) -#define PHY_VHT_CHANNEL_MODE80 (0x2) -#define PHY_VHT_CHANNEL_MODE160 (0x3) - -/* - * Control channel position: - * For legacy set bit means upper channel, otherwise lower. - * For VHT - bit-2 marks if the control is lower/upper relative to center-freq - * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. - * center_freq - * | - * 40Mhz |_______|_______| - * 80Mhz |_______|_______|_______|_______| - * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| - * code 011 010 001 000 | 100 101 110 111 - */ -#define PHY_VHT_CTRL_POS_1_BELOW (0x0) -#define PHY_VHT_CTRL_POS_2_BELOW (0x1) -#define PHY_VHT_CTRL_POS_3_BELOW (0x2) -#define PHY_VHT_CTRL_POS_4_BELOW (0x3) -#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) -#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) -#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) -#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) - -/* - * @band: PHY_BAND_* - * @channel: channel number - * @width: PHY_[VHT|LEGACY]_CHANNEL_* - * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* - */ -struct iwl_fw_channel_info { - u8 band; - u8 channel; - u8 width; - u8 ctrl_pos; -} __packed; - -#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) -#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) -#define PHY_RX_CHAIN_VALID_POS (1) -#define PHY_RX_CHAIN_VALID_MSK \ - (0x7 << PHY_RX_CHAIN_VALID_POS) -#define PHY_RX_CHAIN_FORCE_SEL_POS (4) -#define PHY_RX_CHAIN_FORCE_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) -#define PHY_RX_CHAIN_CNT_POS (10) -#define PHY_RX_CHAIN_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_CNT_POS) -#define PHY_RX_CHAIN_MIMO_CNT_POS (12) -#define PHY_RX_CHAIN_MIMO_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) -#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) -#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) - -/* TODO: fix the value, make it depend on firmware at runtime? */ -#define NUM_PHY_CTX 3 - -/* TODO: complete missing documentation */ -/** - * struct iwl_phy_context_cmd - config of the PHY context - * ( PHY_CONTEXT_CMD = 0x8 ) - * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* - * @apply_time: 0 means immediate apply and context switch. - * other value means apply new params after X usecs - * @tx_param_color: ??? - * @channel_info: - * @txchain_info: ??? - * @rxchain_info: ??? - * @acquisition_data: ??? - * @dsp_cfg_flags: set to 0 - */ -struct iwl_phy_context_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_1 */ - __le32 apply_time; - __le32 tx_param_color; - struct iwl_fw_channel_info ci; - __le32 txchain_info; - __le32 rxchain_info; - __le32 acquisition_data; - __le32 dsp_cfg_flags; -} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ - -/* - * Aux ROC command - * - * Command requests the firmware to create a time event for a certain duration - * and remain on the given channel. This is done by using the Aux framework in - * the FW. - * The command was first used for Hot Spot issues - but can be used regardless - * to Hot Spot. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the - * event_unique_id should be the id of the time event assigned by ucode. - * Otherwise ignore the event_unique_id. - * @sta_id_and_color: station id and color, resumed during "Remain On Channel" - * activity. - * @channel_info: channel info - * @node_addr: Our MAC Address - * @reserved: reserved for alignment - * @apply_time: GP2 value to start (should always be the current GP2 value) - * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max - * time by which start of the event is allowed to be postponed. - * @duration: event duration in TU To calculate event duration: - * timeEventDuration = min(duration, remainingQuota) - */ -struct iwl_hs20_roc_req { - /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ - __le32 id_and_color; - __le32 action; - __le32 event_unique_id; - __le32 sta_id_and_color; - struct iwl_fw_channel_info channel_info; - u8 node_addr[ETH_ALEN]; - __le16 reserved; - __le32 apply_time; - __le32 apply_time_max_delay; - __le32 duration; -} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ - -/* - * values for AUX ROC result values - */ -enum iwl_mvm_hot_spot { - HOT_SPOT_RSP_STATUS_OK, - HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, - HOT_SPOT_MAX_NUM_OF_SESSIONS, -}; - -/* - * Aux ROC command response - * - * In response to iwl_hs20_roc_req the FW sends this command to notify the - * driver the uid of the timevent. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @event_unique_id: Unique ID of time event assigned by ucode - * @status: Return status 0 is success, all the rest used for specific errors - */ -struct iwl_hs20_roc_res { - __le32 event_unique_id; - __le32 status; -} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( RADIO_VERSION_NOTIFICATION = 0x68 ) - * @radio_flavor: - * @radio_step: - * @radio_dash: - */ -struct iwl_radio_version_notif { - __le32 radio_flavor; - __le32 radio_step; - __le32 radio_dash; -} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ - -enum iwl_card_state_flags { - CARD_ENABLED = 0x00, - HW_CARD_DISABLED = 0x01, - SW_CARD_DISABLED = 0x02, - CT_KILL_CARD_DISABLED = 0x04, - HALT_CARD_DISABLED = 0x08, - CARD_DISABLED_MSK = 0x0f, - CARD_IS_RX_ON = 0x10, -}; - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( CARD_STATE_NOTIFICATION = 0xa1 ) - * @flags: %iwl_card_state_flags - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ - -/** - * struct iwl_missed_beacons_notif - information on missed beacons - * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) - * @mac_id: interface ID - * @consec_missed_beacons_since_last_rx: number of consecutive missed - * beacons since last RX. - * @consec_missed_beacons: number of consecutive missed beacons - * @num_expected_beacons: - * @num_recvd_beacons: - */ -struct iwl_missed_beacons_notif { - __le32 mac_id; - __le32 consec_missed_beacons_since_last_rx; - __le32 consec_missed_beacons; - __le32 num_expected_beacons; - __le32 num_recvd_beacons; -} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ - -/** - * struct iwl_mfuart_load_notif - mfuart image version & status - * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) - * @installed_ver: installed image version - * @external_ver: external image version - * @status: MFUART loading status - * @duration: MFUART loading time -*/ -struct iwl_mfuart_load_notif { - __le32 installed_ver; - __le32 external_ver; - __le32 status; - __le32 duration; -} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/ - -/** - * struct iwl_set_calib_default_cmd - set default value for calibration. - * ( SET_CALIB_DEFAULT_CMD = 0x8e ) - * @calib_index: the calibration to set value for - * @length: of data - * @data: the value to set for the calibration result - */ -struct iwl_set_calib_default_cmd { - __le16 calib_index; - __le16 length; - u8 data[0]; -} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ - -#define MAX_PORT_ID_NUM 2 -#define MAX_MCAST_FILTERING_ADDRESSES 256 - -/** - * struct iwl_mcast_filter_cmd - configure multicast filter. - * @filter_own: Set 1 to filter out multicast packets sent by station itself - * @port_id: Multicast MAC addresses array specifier. This is a strange way - * to identify network interface adopted in host-device IF. - * It is used by FW as index in array of addresses. This array has - * MAX_PORT_ID_NUM members. - * @count: Number of MAC addresses in the array - * @pass_all: Set 1 to pass all multicast packets. - * @bssid: current association BSSID. - * @addr_list: Place holder for array of MAC addresses. - * IMPORTANT: add padding if necessary to ensure DWORD alignment. - */ -struct iwl_mcast_filter_cmd { - u8 filter_own; - u8 port_id; - u8 count; - u8 pass_all; - u8 bssid[6]; - u8 reserved[2]; - u8 addr_list[0]; -} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ - -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - -/* - * enum iwl_mvm_marker_id - maker ids - * - * The ids for different type of markers to insert into the usniffer logs - */ -enum iwl_mvm_marker_id { - MARKER_ID_TX_FRAME_LATENCY = 1, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_mvm_marker - mark info into the usniffer logs - * - * (MARKER_CMD = 0xcb) - * - * Mark the UTC time stamp into the usniffer logs together with additional - * metadata, so the usniffer output can be parsed. - * In the command response the ucode will return the GP2 time. - * - * @dw_len: The amount of dwords following this byte including this byte. - * @marker_id: A unique marker id (iwl_mvm_marker_id). - * @reserved: reserved. - * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC - * @metadata: additional meta data that will be written to the unsiffer log - */ -struct iwl_mvm_marker { - u8 dwLen; - u8 markerId; - __le16 reserved; - __le64 timestamp; - __le32 metadata[0]; -} __packed; /* MARKER_API_S_VER_1 */ - -/* - * enum iwl_dc2dc_config_id - flag ids - * - * Ids of dc2dc configuration flags - */ -enum iwl_dc2dc_config_id { - DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ - DCDC_FREQ_TUNE_SET = 0x2, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_dc2dc_config_cmd - configure dc2dc values - * - * (DC2DC_CONFIG_CMD = 0x83) - * - * Set/Get & configure dc2dc values. - * The command always returns the current dc2dc values. - * - * @flags: set/get dc2dc - * @enable_low_power_mode: not used. - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_cmd { - __le32 flags; - __le32 enable_low_power_mode; /* not used */ - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd - * - * Current dc2dc values returned by the FW. - * - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_resp { - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ - -/*********************************** - * Smart Fifo API - ***********************************/ -/* Smart Fifo state */ -enum iwl_sf_state { - SF_LONG_DELAY_ON = 0, /* should never be called by driver */ - SF_FULL_ON, - SF_UNINIT, - SF_INIT_OFF, - SF_HW_NUM_STATES -}; - -/* Smart Fifo possible scenario */ -enum iwl_sf_scenario { - SF_SCENARIO_SINGLE_UNICAST, - SF_SCENARIO_AGG_UNICAST, - SF_SCENARIO_MULTICAST, - SF_SCENARIO_BA_RESP, - SF_SCENARIO_TX_RESP, - SF_NUM_SCENARIO -}; - -#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ -#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ - -/* smart FIFO default values */ -#define SF_W_MARK_SISO 6144 -#define SF_W_MARK_MIMO2 8192 -#define SF_W_MARK_MIMO3 6144 -#define SF_W_MARK_LEGACY 4096 -#define SF_W_MARK_SCAN 4096 - -/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ -#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ - -/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ -#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ -#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ -#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ -#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ - -#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ - -#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) - -/** - * Smart Fifo configuration command. - * @state: smart fifo state, types listed in enum %iwl_sf_sate. - * @watermark: Minimum allowed availabe free space in RXF for transient state. - * @long_delay_timeouts: aging and idle timer values for each scenario - * in long delay state. - * @full_on_timeouts: timer values for each scenario in full on state. - */ -struct iwl_sf_cfg_cmd { - __le32 state; - __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; - __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; - __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; -} __packed; /* SF_CFG_API_S_VER_2 */ - -/*********************************** - * Location Aware Regulatory (LAR) API - MCC updates - ***********************************/ - -/** - * struct iwl_mcc_update_cmd - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - */ -struct iwl_mcc_update_cmd { - __le16 mcc; - u8 source_id; - u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S */ - -/** - * iwl_mcc_update_resp - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */ - -/** - * struct iwl_mcc_chub_notif - chub notifies of mcc change - * (MCC_CHUB_UPDATE_CMD = 0xc9) - * The Chub (Communication Hub, CommsHUB) is a HW component that connects to - * the cellular and connectivity cores that gets updates of the mcc, and - * notifies the ucode directly of any mcc change. - * The ucode requests the driver to request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: identity of the change originator, see iwl_mcc_source - * @reserved1: reserved for alignment - */ -struct iwl_mcc_chub_notif { - u16 mcc; - u8 source_id; - u8 reserved1; -} __packed; /* LAR_MCC_NOTIFY_S */ - -enum iwl_mcc_update_status { - MCC_RESP_NEW_CHAN_PROFILE, - MCC_RESP_SAME_CHAN_PROFILE, - MCC_RESP_INVALID, - MCC_RESP_NVM_DISABLED, - MCC_RESP_ILLEGAL, - MCC_RESP_LOW_PRIORITY, -}; - -enum iwl_mcc_source { - MCC_SOURCE_OLD_FW = 0, - MCC_SOURCE_ME = 1, - MCC_SOURCE_BIOS = 2, - MCC_SOURCE_3G_LTE_HOST = 3, - MCC_SOURCE_3G_LTE_DEVICE = 4, - MCC_SOURCE_WIFI = 5, - MCC_SOURCE_RESERVED = 6, - MCC_SOURCE_DEFAULT = 7, - MCC_SOURCE_UNINITIALIZED = 8, - MCC_SOURCE_GET_CURRENT = 0x10 -}; - -/* DTS measurements */ - -enum iwl_dts_measurement_flags { - DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), - DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), -}; - -/** - * iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements - * - * @flags: indicates which measurements we want as specified in &enum - * iwl_dts_measurement_flags - */ -struct iwl_dts_measurement_cmd { - __le32 flags; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ - -/** -* enum iwl_dts_control_measurement_mode - DTS measurement type -* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read -* back (latest value. Not waiting for new value). Use automatic -* SW DTS configuration. -* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, -* trigger DTS reading and provide read back temperature read -* when available. -* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read -* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, -* without measurement trigger. -*/ -enum iwl_dts_control_measurement_mode { - DTS_AUTOMATIC = 0, - DTS_REQUEST_READ = 1, - DTS_OVER_WRITE = 2, - DTS_DIRECT_WITHOUT_MEASURE = 3, -}; - -/** -* enum iwl_dts_used - DTS to use or used for measurement in the DTS request -* @DTS_USE_TOP: Top -* @DTS_USE_CHAIN_A: chain A -* @DTS_USE_CHAIN_B: chain B -* @DTS_USE_CHAIN_C: chain C -* @XTAL_TEMPERATURE - read temperature from xtal -*/ -enum iwl_dts_used { - DTS_USE_TOP = 0, - DTS_USE_CHAIN_A = 1, - DTS_USE_CHAIN_B = 2, - DTS_USE_CHAIN_C = 3, - XTAL_TEMPERATURE = 4, -}; - -/** -* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode -* @DTS_BIT6_MODE: bit 6 mode -* @DTS_BIT8_MODE: bit 8 mode -*/ -enum iwl_dts_bit_mode { - DTS_BIT6_MODE = 0, - DTS_BIT8_MODE = 1, -}; - -/** - * iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements - * @control_mode: see &enum iwl_dts_control_measurement_mode - * @temperature: used when over write DTS mode is selected - * @sensor: set temperature sensor to use. See &enum iwl_dts_used - * @avg_factor: average factor to DTS in request DTS read mode - * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode - * @step_duration: step duration for the DTS - */ -struct iwl_ext_dts_measurement_cmd { - __le32 control_mode; - __le32 temperature; - __le32 sensor; - __le32 avg_factor; - __le32 bit_mode; - __le32 step_duration; -} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ - -/** - * iwl_dts_measurement_notif - notification received with the measurements - * - * @temp: the measured temperature - * @voltage: the measured voltage - */ -struct iwl_dts_measurement_notif { - __le32 temp; - __le32 voltage; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ - -/*********************************** - * TDLS API - ***********************************/ - -/* Type of TDLS request */ -enum iwl_tdls_channel_switch_type { - TDLS_SEND_CHAN_SW_REQ = 0, - TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, - TDLS_MOVE_CH, -}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ - -/** - * Switch timing sub-element in a TDLS channel-switch command - * @frame_timestamp: GP2 timestamp of channel-switch request/response packet - * received from peer - * @max_offchan_duration: What amount of microseconds out of a DTIM is given - * to the TDLS off-channel communication. For instance if the DTIM is - * 200TU and the TDLS peer is to be given 25% of the time, the value - * given will be 50TU, or 50 * 1024 if translated into microseconds. - * @switch_time: switch time the peer sent in its channel switch timing IE - * @switch_timout: switch timeout the peer sent in its channel switch timing IE - */ -struct iwl_tdls_channel_switch_timing { - __le32 frame_timestamp; /* GP2 time of peer packet Rx */ - __le32 max_offchan_duration; /* given in micro-seconds */ - __le32 switch_time; /* given in micro-seconds */ - __le32 switch_timeout; /* given in micro-seconds */ -} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ - -#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 - -/** - * TDLS channel switch frame template - * - * A template representing a TDLS channel-switch request or response frame - * - * @switch_time_offset: offset to the channel switch timing IE in the template - * @tx_cmd: Tx parameters for the frame - * @data: frame data - */ -struct iwl_tdls_channel_switch_frame { - __le32 switch_time_offset; - struct iwl_tx_cmd tx_cmd; - u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ - -/** - * TDLS channel switch command - * - * The command is sent to initiate a channel switch and also in response to - * incoming TDLS channel-switch request/response packets from remote peers. - * - * @switch_type: see &enum iwl_tdls_channel_switch_type - * @peer_sta_id: station id of TDLS peer - * @ci: channel we switch to - * @timing: timing related data for command - * @frame: channel-switch request/response template, depending to switch_type - */ -struct iwl_tdls_channel_switch_cmd { - u8 switch_type; - __le32 peer_sta_id; - struct iwl_fw_channel_info ci; - struct iwl_tdls_channel_switch_timing timing; - struct iwl_tdls_channel_switch_frame frame; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ - -/** - * TDLS channel switch start notification - * - * @status: non-zero on success - * @offchannel_duration: duration given in microseconds - * @sta_id: peer currently performing the channel-switch with - */ -struct iwl_tdls_channel_switch_notif { - __le32 status; - __le32 offchannel_duration; - __le32 sta_id; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ - -/** - * TDLS station info - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx - * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer - * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise - */ -struct iwl_tdls_sta_info { - u8 sta_id; - u8 tx_to_peer_tid; - __le16 tx_to_peer_ssn; - __le32 is_initiator; -} __packed; /* TDLS_STA_INFO_VER_1 */ - -/** - * TDLS basic config command - * - * @id_and_color: MAC id and color being configured - * @tdls_peer_count: amount of currently connected TDLS peers - * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx - * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP - * @sta_info: per-station info. Only the first tdls_peer_count entries are set - * @pti_req_data_offset: offset of network-level data for the PTI template - * @pti_req_tx_cmd: Tx parameters for PTI request template - * @pti_req_template: PTI request template data - */ -struct iwl_tdls_config_cmd { - __le32 id_and_color; /* mac id and color */ - u8 tdls_peer_count; - u8 tx_to_ap_tid; - __le16 tx_to_ap_ssn; - struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; - - __le32 pti_req_data_offset; - struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; -} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ - -/** - * TDLS per-station config information from FW - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to - * the peer - */ -struct iwl_tdls_config_sta_info_res { - __le16 sta_id; - __le16 tx_to_peer_last_seq; -} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ - -/** - * TDLS config information from FW - * - * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP - * @sta_info: per-station TDLS config information - */ -struct iwl_tdls_config_res { - __le32 tx_to_ap_last_seq; - struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; -} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ - -#define TX_FIFO_MAX_NUM 8 -#define RX_FIFO_MAX_NUM 2 - -/** - * Shared memory configuration information from the FW - * - * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not - * accessible) - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to - * 0x0 as accessible only via DBGM RDAT) - * @sample_buff_size: internal sample buff size - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre - * 8000 HW set to 0x0 as not accessible) - * @txfifo_size: size of TXF0 ... TXF7 - * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - */ -struct iwl_shared_mem_cfg { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; - __le32 page_buff_addr; - __le32 page_buff_size; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ - -#endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c deleted file mode 100644 index d906fa13ba97..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ /dev/null @@ -1,1166 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include - -#include "iwl-trans.h" -#include "iwl-op-mode.h" -#include "iwl-fw.h" -#include "iwl-debug.h" -#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ -#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ -#include "iwl-prph.h" -#include "iwl-eeprom-parse.h" - -#include "mvm.h" -#include "iwl-phy-db.h" - -#define MVM_UCODE_ALIVE_TIMEOUT HZ -#define MVM_UCODE_CALIB_TIMEOUT (2*HZ) - -#define UCODE_VALID_OK cpu_to_le32(0x1) - -struct iwl_mvm_alive_data { - bool valid; - u32 scd_base_addr; -}; - -static inline const struct fw_img * -iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) -{ - if (ucode_type >= IWL_UCODE_TYPE_MAX) - return NULL; - - return &mvm->fw->img[ucode_type]; -} - -static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) -{ - struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { - .valid = cpu_to_le32(valid_tx_ant), - }; - - IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); - return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, - sizeof(tx_ant_cmd), &tx_ant_cmd); -} - -static void iwl_free_fw_paging(struct iwl_mvm *mvm) -{ - int i; - - if (!mvm->fw_paging_db[0].fw_paging_block) - return; - - for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { - if (!mvm->fw_paging_db[i].fw_paging_block) { - IWL_DEBUG_FW(mvm, - "Paging: block %d already freed, continue to next page\n", - i); - - continue; - } - - __free_pages(mvm->fw_paging_db[i].fw_paging_block, - get_order(mvm->fw_paging_db[i].fw_paging_size)); - } - kfree(mvm->trans->paging_download_buf); - memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); -} - -static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) -{ - int sec_idx, idx; - u32 offset = 0; - - /* - * find where is the paging image start point: - * if CPU2 exist and it's in paging format, then the image looks like: - * CPU1 sections (2 or more) - * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 - * CPU2 sections (not paged) - * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 - * non paged to CPU2 paging sec - * CPU2 paging CSS - * CPU2 paging image (including instruction and data) - */ - for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { - if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { - sec_idx++; - break; - } - } - - if (sec_idx >= IWL_UCODE_SECTION_MAX) { - IWL_ERR(mvm, "driver didn't find paging image\n"); - iwl_free_fw_paging(mvm); - return -EINVAL; - } - - /* copy the CSS block to the dram */ - IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", - sec_idx); - - memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), - image->sec[sec_idx].data, - mvm->fw_paging_db[0].fw_paging_size); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d CSS bytes to first block\n", - mvm->fw_paging_db[0].fw_paging_size); - - sec_idx++; - - /* - * copy the paging blocks to the dram - * loop index start from 1 since that CSS block already copied to dram - * and CSS index is 0. - * loop stop at num_of_paging_blk since that last block is not full. - */ - for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { - memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), - image->sec[sec_idx].data + offset, - mvm->fw_paging_db[idx].fw_paging_size); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d paging bytes to block %d\n", - mvm->fw_paging_db[idx].fw_paging_size, - idx); - - offset += mvm->fw_paging_db[idx].fw_paging_size; - } - - /* copy the last paging block */ - if (mvm->num_of_pages_in_last_blk > 0) { - memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), - image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d pages in the last block %d\n", - mvm->num_of_pages_in_last_blk, idx); - } - - return 0; -} - -static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, - const struct fw_img *image) -{ - struct page *block; - dma_addr_t phys = 0; - int blk_idx = 0; - int order, num_of_pages; - int dma_enabled; - - if (mvm->fw_paging_db[0].fw_paging_block) - return 0; - - dma_enabled = is_device_dma_capable(mvm->trans->dev); - - /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ - BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); - - num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = ((num_of_pages - 1) / - NUM_OF_PAGE_PER_GROUP) + 1; - - mvm->num_of_pages_in_last_blk = - num_of_pages - - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); - - IWL_DEBUG_FW(mvm, - "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", - mvm->num_of_paging_blk, - mvm->num_of_pages_in_last_blk); - - /* allocate block of 4Kbytes for paging CSS */ - order = get_order(FW_PAGING_SIZE); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one since - * we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - - /* - * allocate blocks in dram. - * since that CSS allocated in fw_paging_db[0] loop start from index 1 - */ - for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* allocate block of PAGING_BLOCK_SIZE (32K) */ - order = get_order(PAGING_BLOCK_SIZE); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one - * since we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = - PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); - } - - return 0; -} - -static int iwl_save_fw_paging(struct iwl_mvm *mvm, - const struct fw_img *fw) -{ - int ret; - - ret = iwl_alloc_fw_paging_mem(mvm, fw); - if (ret) - return ret; - - return iwl_fill_paging_mem(mvm, fw); -} - -/* send paging cmd to FW in case CPU2 has paging image */ -static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) -{ - int blk_idx; - __le32 dev_phy_addr; - struct iwl_fw_paging_cmd fw_paging_cmd = { - .flags = - cpu_to_le32(PAGING_CMD_IS_SECURED | - PAGING_CMD_IS_ENABLED | - (mvm->num_of_pages_in_last_blk << - PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), - .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), - .block_num = cpu_to_le32(mvm->num_of_paging_blk), - }; - - /* loop for for all paging blocks + CSS block */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - dev_phy_addr = - cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> - PAGE_2_EXP_SIZE); - fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(fw_paging_cmd), &fw_paging_cmd); -} - -/* - * Send paging item cmd to FW in case CPU2 has paging image - */ -static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) -{ - int ret; - struct iwl_fw_get_item_cmd fw_get_item_cmd = { - .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), - }; - - struct iwl_fw_get_item_resp *item_resp; - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &fw_get_item_cmd, }, - }; - - cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, - "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", - ret); - return ret; - } - - item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; - if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { - IWL_ERR(mvm, - "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", - le32_to_cpu(item_resp->item_id)); - ret = -EIO; - goto exit; - } - - mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, - GFP_KERNEL); - if (!mvm->trans->paging_download_buf) { - ret = -ENOMEM; - goto exit; - } - mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); - mvm->trans->paging_db = mvm->fw_paging_db; - IWL_DEBUG_FW(mvm, - "Paging: got paging request address (paging_req_addr 0x%08x)\n", - mvm->trans->paging_req_addr); - -exit: - iwl_free_resp(&cmd); - - return ret; -} - -static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - struct iwl_mvm_alive_data *alive_data = data; - struct mvm_alive_resp_ver1 *palive1; - struct mvm_alive_resp_ver2 *palive2; - struct mvm_alive_resp *palive; - - if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { - palive1 = (void *)pkt->data; - - mvm->support_umac_log = false; - mvm->error_event_table = - le32_to_cpu(palive1->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive1->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); - - alive_data->valid = le16_to_cpu(palive1->status) == - IWL_ALIVE_STATUS_OK; - IWL_DEBUG_FW(mvm, - "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive1->status), palive1->ver_type, - palive1->ver_subtype, palive1->flags); - } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { - palive2 = (void *)pkt->data; - - mvm->error_event_table = - le32_to_cpu(palive2->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive2->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); - mvm->umac_error_event_table = - le32_to_cpu(palive2->error_info_addr); - mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr); - mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size); - - alive_data->valid = le16_to_cpu(palive2->status) == - IWL_ALIVE_STATUS_OK; - if (mvm->umac_error_event_table) - mvm->support_umac_log = true; - - IWL_DEBUG_FW(mvm, - "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive2->status), palive2->ver_type, - palive2->ver_subtype, palive2->flags); - - IWL_DEBUG_FW(mvm, - "UMAC version: Major - 0x%x, Minor - 0x%x\n", - palive2->umac_major, palive2->umac_minor); - } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { - palive = (void *)pkt->data; - - mvm->error_event_table = - le32_to_cpu(palive->error_event_table_ptr); - mvm->log_event_table = - le32_to_cpu(palive->log_event_table_ptr); - alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); - mvm->umac_error_event_table = - le32_to_cpu(palive->error_info_addr); - mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); - mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); - - alive_data->valid = le16_to_cpu(palive->status) == - IWL_ALIVE_STATUS_OK; - if (mvm->umac_error_event_table) - mvm->support_umac_log = true; - - IWL_DEBUG_FW(mvm, - "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", - le16_to_cpu(palive->status), palive->ver_type, - palive->ver_subtype, palive->flags); - - IWL_DEBUG_FW(mvm, - "UMAC version: Major - 0x%x, Minor - 0x%x\n", - le32_to_cpu(palive->umac_major), - le32_to_cpu(palive->umac_minor)); - } - - return true; -} - -static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_phy_db *phy_db = data; - - if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { - WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); - return true; - } - - WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); - - return false; -} - -static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, - enum iwl_ucode_type ucode_type) -{ - struct iwl_notification_wait alive_wait; - struct iwl_mvm_alive_data alive_data; - const struct fw_img *fw; - int ret, i; - enum iwl_ucode_type old_type = mvm->cur_ucode; - static const u16 alive_cmd[] = { MVM_ALIVE }; - struct iwl_sf_region st_fwrd_space; - - if (ucode_type == IWL_UCODE_REGULAR && - iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) - fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); - else - fw = iwl_get_ucode_image(mvm, ucode_type); - if (WARN_ON(!fw)) - return -EINVAL; - mvm->cur_ucode = ucode_type; - mvm->ucode_loaded = false; - - iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, - alive_cmd, ARRAY_SIZE(alive_cmd), - iwl_alive_fn, &alive_data); - - ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); - if (ret) { - mvm->cur_ucode = old_type; - iwl_remove_notification(&mvm->notif_wait, &alive_wait); - return ret; - } - - /* - * Some things may run in the background now, but we - * just wait for the ALIVE notification here. - */ - ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, - MVM_UCODE_ALIVE_TIMEOUT); - if (ret) { - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - IWL_ERR(mvm, - "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", - iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), - iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); - mvm->cur_ucode = old_type; - return ret; - } - - if (!alive_data.valid) { - IWL_ERR(mvm, "Loaded ucode is not valid!\n"); - mvm->cur_ucode = old_type; - return -EIO; - } - - /* - * update the sdio allocation according to the pointer we get in the - * alive notification. - */ - st_fwrd_space.addr = mvm->sf_space.addr; - st_fwrd_space.size = mvm->sf_space.size; - ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); - if (ret) { - IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); - return ret; - } - - iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); - - /* - * configure and operate fw paging mechanism. - * driver configures the paging flow only once, CPU2 paging image - * included in the IWL_UCODE_INIT image. - */ - if (fw->paging_mem_size) { - /* - * When dma is not enabled, the driver needs to copy / write - * the downloaded / uploaded page to / from the smem. - * This gets the location of the place were the pages are - * stored. - */ - if (!is_device_dma_capable(mvm->trans->dev)) { - ret = iwl_trans_get_paging_item(mvm); - if (ret) { - IWL_ERR(mvm, "failed to get FW paging item\n"); - return ret; - } - } - - ret = iwl_save_fw_paging(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to save the FW paging image\n"); - return ret; - } - - ret = iwl_send_paging_cmd(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to send the paging cmd\n"); - iwl_free_fw_paging(mvm); - return ret; - } - } - - /* - * Note: all the queues are enabled as part of the interface - * initialization, but in firmware restart scenarios they - * could be stopped, so wake them up. In firmware restart, - * mac80211 will have the queues stopped as well until the - * reconfiguration completes. During normal startup, they - * will be empty. - */ - - memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; - - for (i = 0; i < IEEE80211_MAX_QUEUES; i++) - atomic_set(&mvm->mac80211_queue_stop_count[i], 0); - - mvm->ucode_loaded = true; - - return 0; -} - -static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) -{ - struct iwl_phy_cfg_cmd phy_cfg_cmd; - enum iwl_ucode_type ucode_type = mvm->cur_ucode; - - /* Set parameters */ - phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); - phy_cfg_cmd.calib_control.event_trigger = - mvm->fw->default_calib[ucode_type].event_trigger; - phy_cfg_cmd.calib_control.flow_trigger = - mvm->fw->default_calib[ucode_type].flow_trigger; - - IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", - phy_cfg_cmd.phy_cfg); - - return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, - sizeof(phy_cfg_cmd), &phy_cfg_cmd); -} - -int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) -{ - struct iwl_notification_wait calib_wait; - static const u16 init_complete[] = { - INIT_COMPLETE_NOTIF, - CALIB_RES_NOTIF_PHY_DB - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON_ONCE(mvm->calibrating)) - return 0; - - iwl_init_notification_wait(&mvm->notif_wait, - &calib_wait, - init_complete, - ARRAY_SIZE(init_complete), - iwl_wait_phy_db_entry, - mvm->phy_db); - - /* Will also start the device */ - ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); - if (ret) { - IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); - goto error; - } - - ret = iwl_send_bt_init_conf(mvm); - if (ret) - goto error; - - /* Read the NVM only at driver load time, no need to do this twice */ - if (read_nvm) { - /* Read nvm */ - ret = iwl_nvm_init(mvm, true); - if (ret) { - IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; - } - } - - /* In case we read the NVM from external file, load it to the NIC */ - if (mvm->nvm_file_name) - iwl_mvm_load_nvm_to_nic(mvm); - - ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); - WARN_ON(ret); - - /* - * abort after reading the nvm in case RF Kill is on, we will complete - * the init seq later when RF kill will switch to off - */ - if (iwl_mvm_is_radio_hw_killed(mvm)) { - IWL_DEBUG_RF_KILL(mvm, - "jump over all phy activities due to RF kill\n"); - iwl_remove_notification(&mvm->notif_wait, &calib_wait); - ret = 1; - goto out; - } - - mvm->calibrating = true; - - /* Send TX valid antennas before triggering calibrations */ - ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); - if (ret) - goto error; - - /* - * Send phy configurations command to init uCode - * to start the 16.0 uCode init image internal calibrations. - */ - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", - ret); - goto error; - } - - /* - * Some things may run in the background now, but we - * just wait for the calibration complete notification. - */ - ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, - MVM_UCODE_CALIB_TIMEOUT); - - if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { - IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); - ret = 1; - } - goto out; - -error: - iwl_remove_notification(&mvm->notif_wait, &calib_wait); -out: - mvm->calibrating = false; - if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { - /* we want to debug INIT and we have no NVM - fake */ - mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + - sizeof(struct ieee80211_channel) + - sizeof(struct ieee80211_rate), - GFP_KERNEL); - if (!mvm->nvm_data) - return -ENOMEM; - mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; - mvm->nvm_data->bands[0].n_channels = 1; - mvm->nvm_data->bands[0].n_bitrates = 1; - mvm->nvm_data->bands[0].bitrates = - (void *)mvm->nvm_data->channels + 1; - mvm->nvm_data->bands[0].bitrates->hw_value = 10; - } - - return ret; -} - -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) -{ - struct iwl_host_cmd cmd = { - .id = SHARED_MEM_CFG, - .flags = CMD_WANT_SKB, - .data = { NULL, }, - .len = { 0, }, - }; - struct iwl_rx_packet *pkt; - struct iwl_shared_mem_cfg *mem_cfg; - u32 i; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) - return; - - pkt = cmd.resp_pkt; - mem_cfg = (void *)pkt->data; - - mvm->shared_mem_cfg.shared_mem_addr = - le32_to_cpu(mem_cfg->shared_mem_addr); - mvm->shared_mem_cfg.shared_mem_size = - le32_to_cpu(mem_cfg->shared_mem_size); - mvm->shared_mem_cfg.sample_buff_addr = - le32_to_cpu(mem_cfg->sample_buff_addr); - mvm->shared_mem_cfg.sample_buff_size = - le32_to_cpu(mem_cfg->sample_buff_size); - mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) - mvm->shared_mem_cfg.txfifo_size[i] = - le32_to_cpu(mem_cfg->txfifo_size[i]); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) - mvm->shared_mem_cfg.rxfifo_size[i] = - le32_to_cpu(mem_cfg->rxfifo_size[i]); - mvm->shared_mem_cfg.page_buff_addr = - le32_to_cpu(mem_cfg->page_buff_addr); - mvm->shared_mem_cfg.page_buff_size = - le32_to_cpu(mem_cfg->page_buff_size); - IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); - - iwl_free_resp(&cmd); -} - -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - struct iwl_mvm_dump_desc *desc, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - unsigned int delay = 0; - - if (trigger) - delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); - - if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) - return -EBUSY; - - if (WARN_ON(mvm->fw_dump_desc)) - iwl_mvm_free_fw_dump_desc(mvm); - - IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", - le32_to_cpu(desc->trig_desc.type)); - - mvm->fw_dump_desc = desc; - mvm->fw_dump_trig = trigger; - - queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); - - return 0; -} - -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - struct iwl_mvm_dump_desc *desc; - - desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); - if (!desc) - return -ENOMEM; - - desc->len = len; - desc->trig_desc.type = cpu_to_le32(trig); - memcpy(desc->trig_desc.data, str, len); - - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); -} - -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) -{ - u16 occurrences = le16_to_cpu(trigger->occurrences); - int ret, len = 0; - char buf[64]; - - if (!occurrences) - return 0; - - if (fmt) { - va_list ap; - - buf[sizeof(buf) - 1] = '\0'; - - va_start(ap, fmt); - vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - - /* check for truncation */ - if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) - buf[sizeof(buf) - 1] = '\0'; - - len = strlen(buf) + 1; - } - - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, - trigger); - - if (ret) - return ret; - - trigger->occurrences = cpu_to_le16(occurrences - 1); - return 0; -} - -static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) -{ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) - iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - else - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); -} - -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) -{ - u8 *ptr; - int ret; - int i; - - if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), - "Invalid configuration %d\n", conf_id)) - return -EINVAL; - - /* EARLY START - firmware's configuration is hard coded */ - if ((!mvm->fw->dbg_conf_tlv[conf_id] || - !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && - conf_id == FW_DBG_START_FROM_ALIVE) { - iwl_mvm_restart_early_start(mvm); - return 0; - } - - if (!mvm->fw->dbg_conf_tlv[conf_id]) - return -EINVAL; - - if (mvm->fw_dbg_conf != FW_DBG_INVALID) - IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", - mvm->fw_dbg_conf); - - /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { - struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; - - ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, - le16_to_cpu(cmd->len), cmd->data); - if (ret) - return ret; - - ptr += sizeof(*cmd); - ptr += le16_to_cpu(cmd->len); - } - - mvm->fw_dbg_conf = conf_id; - return ret; -} - -static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) -{ - struct iwl_ltr_config_cmd cmd = { - .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), - }; - - if (!mvm->trans->ltr_enabled) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, - sizeof(cmd), &cmd); -} - -int iwl_mvm_up(struct iwl_mvm *mvm) -{ - int ret, i; - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_trans_start_hw(mvm->trans); - if (ret) - return ret; - - /* - * If we haven't completed the run of the init ucode during - * module loading, load init ucode now - * (for example, if we were in RFKILL) - */ - ret = iwl_run_init_mvm_ucode(mvm, false); - if (ret && !iwlmvm_mod_params.init_dbg) { - IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); - /* this can't happen */ - if (WARN_ON(ret > 0)) - ret = -ERFKILL; - goto error; - } - if (!iwlmvm_mod_params.init_dbg) { - /* - * Stop and start the transport without entering low power - * mode. This will save the state of other components on the - * device that are triggered by the INIT firwmare (MFUART). - */ - _iwl_trans_stop_device(mvm->trans, false); - ret = _iwl_trans_start_hw(mvm->trans, false); - if (ret) - goto error; - } - - if (iwlmvm_mod_params.init_dbg) - return 0; - - ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); - if (ret) { - IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - goto error; - } - - iwl_mvm_get_shared_mem_conf(mvm); - - ret = iwl_mvm_sf_update(mvm, NULL, false); - if (ret) - IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); - - mvm->fw_dbg_conf = FW_DBG_INVALID; - /* if we have a destination, assume EARLY START */ - if (mvm->fw->dbg_dest_tlv) - mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; - iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); - - ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); - if (ret) - goto error; - - ret = iwl_send_bt_init_conf(mvm); - if (ret) - goto error; - - /* Send phy db control command and then phy db calibration*/ - ret = iwl_send_phy_db_data(mvm->phy_db); - if (ret) - goto error; - - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; - - /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); - - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; - - /* reset quota debouncing buffer - 0xff will yield invalid data */ - memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); - - /* Add auxiliary station for scanning */ - ret = iwl_mvm_add_aux_sta(mvm); - if (ret) - goto error; - - /* Add all the PHY contexts */ - chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - for (i = 0; i < NUM_PHY_CTX; i++) { - /* - * The channel used here isn't relevant as it's - * going to be overwritten in the other flows. - * For now use the first channel we have. - */ - ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], - &chandef, 1, 1); - if (ret) - goto error; - } - - /* Initialize tx backoffs to the minimal possible */ - iwl_mvm_tt_tx_backoff(mvm, 0); - - WARN_ON(iwl_mvm_config_ltr(mvm)); - - ret = iwl_mvm_power_update_device(mvm); - if (ret) - goto error; - - /* - * RTNL is not taken during Ct-kill, but we don't need to scan/Tx - * anyway, so don't init MCC. - */ - if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { - ret = iwl_mvm_init_mcc(mvm); - if (ret) - goto error; - } - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - ret = iwl_mvm_config_scan(mvm); - if (ret) - goto error; - } - - if (iwl_mvm_is_csum_supported(mvm) && - mvm->cfg->features & NETIF_F_RXCSUM) - iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); - - /* allow FW/transport low power modes if not during restart */ - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); - - IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); - return 0; - error: - iwl_trans_stop_device(mvm->trans); - return ret; -} - -int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) -{ - int ret, i; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_trans_start_hw(mvm->trans); - if (ret) - return ret; - - ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); - if (ret) { - IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); - goto error; - } - - ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); - if (ret) - goto error; - - /* Send phy db control command and then phy db calibration*/ - ret = iwl_send_phy_db_data(mvm->phy_db); - if (ret) - goto error; - - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; - - /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); - - /* Add auxiliary station for scanning */ - ret = iwl_mvm_add_aux_sta(mvm); - if (ret) - goto error; - - return 0; - error: - iwl_trans_stop_device(mvm->trans); - return ret; -} - -void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; - u32 flags = le32_to_cpu(card_state_notif->flags); - - IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_KILL_CARD_DISABLED) ? - "Reached" : "Not reached"); -} - -void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; - - IWL_DEBUG_INFO(mvm, - "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", - le32_to_cpu(mfuart_notif->installed_ver), - le32_to_cpu(mfuart_notif->external_ver), - le32_to_cpu(mfuart_notif->status), - le32_to_cpu(mfuart_notif->duration)); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c deleted file mode 100644 index e3b3cf4dbd77..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/led.c +++ /dev/null @@ -1,136 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include "iwl-io.h" -#include "iwl-csr.h" -#include "mvm.h" - -/* Set led register on */ -static void iwl_mvm_led_enable(struct iwl_mvm *mvm) -{ - iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); -} - -/* Set led register off */ -static void iwl_mvm_led_disable(struct iwl_mvm *mvm) -{ - iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF); -} - -static void iwl_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led); - if (brightness > 0) - iwl_mvm_led_enable(mvm); - else - iwl_mvm_led_disable(mvm); -} - -int iwl_mvm_leds_init(struct iwl_mvm *mvm) -{ - int mode = iwlwifi_mod_params.led_mode; - int ret; - - switch (mode) { - case IWL_LED_BLINK: - IWL_ERR(mvm, "Blink led mode not supported, used default\n"); - case IWL_LED_DEFAULT: - case IWL_LED_RF_STATE: - mode = IWL_LED_RF_STATE; - break; - case IWL_LED_DISABLE: - IWL_INFO(mvm, "Led disabled\n"); - return 0; - default: - return -EINVAL; - } - - mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", - wiphy_name(mvm->hw->wiphy)); - mvm->led.brightness_set = iwl_led_brightness_set; - mvm->led.max_brightness = 1; - - if (mode == IWL_LED_RF_STATE) - mvm->led.default_trigger = - ieee80211_get_radio_led_name(mvm->hw); - - ret = led_classdev_register(mvm->trans->dev, &mvm->led); - if (ret) { - kfree(mvm->led.name); - IWL_INFO(mvm, "Failed to enable led\n"); - return ret; - } - - return 0; -} - -void iwl_mvm_leds_exit(struct iwl_mvm *mvm) -{ - if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE) - return; - - led_classdev_unregister(&mvm->led); - kfree(mvm->led.name); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c deleted file mode 100644 index ad7ad720d2e7..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ /dev/null @@ -1,1452 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include "iwl-io.h" -#include "iwl-prph.h" -#include "fw-api.h" -#include "mvm.h" -#include "time-event.h" - -const u8 iwl_mvm_ac_to_tx_fifo[] = { - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_BK, -}; - -struct iwl_mvm_mac_iface_iterator_data { - struct iwl_mvm *mvm; - struct ieee80211_vif *vif; - unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; - unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; - enum iwl_tsf_id preferred_tsf; - bool found_vif; -}; - -struct iwl_mvm_hw_queues_iface_iterator_data { - struct ieee80211_vif *exclude_vif; - unsigned long used_hw_queues; -}; - -static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_mac_iface_iterator_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u16 min_bi; - - /* Skip the interface for which we are trying to assign a tsf_id */ - if (vif == data->vif) - return; - - /* - * The TSF is a hardware/firmware resource, there are 4 and - * the driver should assign and free them as needed. However, - * there are cases where 2 MACs should share the same TSF ID - * for the purpose of clock sync, an optimization to avoid - * clock drift causing overlapping TBTTs/DTIMs for a GO and - * client in the system. - * - * The firmware will decide according to the MAC type which - * will be the master and slave. Clients that need to sync - * with a remote station will be the master, and an AP or GO - * will be the slave. - * - * Depending on the new interface type it can be slaved to - * or become the master of an existing interface. - */ - switch (data->vif->type) { - case NL80211_IFTYPE_STATION: - /* - * The new interface is a client, so if the one we're iterating - * is an AP, and the beacon interval of the AP is a multiple or - * divisor of the beacon interval of the client, the same TSF - * should be used to avoid drift between the new client and - * existing AP. The existing AP will get drift updates from the - * new client context in this case. - */ - if (vif->type != NL80211_IFTYPE_AP || - data->preferred_tsf != NUM_TSF_IDS || - !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) - break; - - min_bi = min(data->vif->bss_conf.beacon_int, - vif->bss_conf.beacon_int); - - if (!min_bi) - break; - - if ((data->vif->bss_conf.beacon_int - - vif->bss_conf.beacon_int) % min_bi == 0) { - data->preferred_tsf = mvmvif->tsf_id; - return; - } - break; - - case NL80211_IFTYPE_AP: - /* - * The new interface is AP/GO, so if its beacon interval is a - * multiple or a divisor of the beacon interval of an existing - * interface, it should get drift updates from an existing - * client or use the same TSF as an existing GO. There's no - * drift between TSFs internally but if they used different - * TSFs then a new client MAC could update one of them and - * cause drift that way. - */ - if ((vif->type != NL80211_IFTYPE_AP && - vif->type != NL80211_IFTYPE_STATION) || - data->preferred_tsf != NUM_TSF_IDS || - !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) - break; - - min_bi = min(data->vif->bss_conf.beacon_int, - vif->bss_conf.beacon_int); - - if (!min_bi) - break; - - if ((data->vif->bss_conf.beacon_int - - vif->bss_conf.beacon_int) % min_bi == 0) { - data->preferred_tsf = mvmvif->tsf_id; - return; - } - break; - default: - /* - * For all other interface types there's no need to - * take drift into account. Either they're exclusive - * like IBSS and monitor, or we don't care much about - * their TSF (like P2P Device), but we won't be able - * to share the TSF resource. - */ - break; - } - - /* - * Unless we exited above, we can't share the TSF resource - * that the virtual interface we're iterating over is using - * with the new one, so clear the available bit and if this - * was the preferred one, reset that as well. - */ - __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); - - if (data->preferred_tsf == mvmvif->tsf_id) - data->preferred_tsf = NUM_TSF_IDS; -} - -/* - * Get the mask of the queues used by the vif - */ -u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) -{ - u32 qmask = 0, ac; - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - qmask |= BIT(vif->hw_queue[ac]); - } - - if (vif->type == NL80211_IFTYPE_AP) - qmask |= BIT(vif->cab_queue); - - return qmask; -} - -static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - - /* exclude the given vif */ - if (vif == data->exclude_vif) - return; - - data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); -} - -static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - /* Mark the queues used by the sta */ - data->used_hw_queues |= mvmsta->tfd_queue_msk; -} - -unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *exclude_vif) -{ - u8 sta_id; - struct iwl_mvm_hw_queues_iface_iterator_data data = { - .exclude_vif = exclude_vif, - .used_hw_queues = - BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue) | - BIT(IWL_MVM_CMD_QUEUE), - }; - - lockdep_assert_held(&mvm->mutex); - - /* mark all VIF used hw queues */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_iface_hw_queues_iter, &data); - - /* don't assign the same hw queues as TDLS stations */ - ieee80211_iterate_stations_atomic(mvm->hw, - iwl_mvm_mac_sta_hw_queues_iter, - &data); - - /* - * Some TDLS stations may be removed but are in the process of being - * drained. Don't touch their queues. - */ - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) - data.used_hw_queues |= mvm->tfd_drained[sta_id]; - - return data.used_hw_queues; -} - -static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_mac_iface_iterator_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - /* Iterator may already find the interface being added -- skip it */ - if (vif == data->vif) { - data->found_vif = true; - return; - } - - /* Mark MAC IDs as used by clearing the available bit, and - * (below) mark TSFs as used if their existing use is not - * compatible with the new interface type. - * No locking or atomic bit operations are needed since the - * data is on the stack of the caller function. - */ - __clear_bit(mvmvif->id, data->available_mac_ids); - - /* find a suitable tsf_id */ - iwl_mvm_mac_tsf_id_iter(_data, mac, vif); -} - -void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_mac_iface_iterator_data data = { - .mvm = mvm, - .vif = vif, - .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, - /* no preference yet */ - .preferred_tsf = NUM_TSF_IDS, - }; - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_mac_tsf_id_iter, &data); - - if (data.preferred_tsf != NUM_TSF_IDS) - mvmvif->tsf_id = data.preferred_tsf; - else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) - mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, - NUM_TSF_IDS); -} - -static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_mac_iface_iterator_data data = { - .mvm = mvm, - .vif = vif, - .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, - .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, - /* no preference yet */ - .preferred_tsf = NUM_TSF_IDS, - .found_vif = false, - }; - u32 ac; - int ret, i; - unsigned long used_hw_queues; - - /* - * Allocate a MAC ID and a TSF for this MAC, along with the queues - * and other resources. - */ - - /* - * Before the iterator, we start with all MAC IDs and TSFs available. - * - * During iteration, all MAC IDs are cleared that are in use by other - * virtual interfaces, and all TSF IDs are cleared that can't be used - * by this new virtual interface because they're used by an interface - * that can't share it with the new one. - * At the same time, we check if there's a preferred TSF in the case - * that we should share it with another interface. - */ - - /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ - switch (vif->type) { - case NL80211_IFTYPE_ADHOC: - break; - case NL80211_IFTYPE_STATION: - if (!vif->p2p) - break; - /* fall through */ - default: - __clear_bit(0, data.available_mac_ids); - } - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_mac_iface_iterator, &data); - - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif); - - /* - * In the case we're getting here during resume, it's similar to - * firmware restart, and with RESUME_ALL the iterator will find - * the vif being added already. - * We don't want to reassign any IDs in either case since doing - * so would probably assign different IDs (as interfaces aren't - * necessarily added in the same order), but the old IDs were - * preserved anyway, so skip ID assignment for both resume and - * recovery. - */ - if (data.found_vif) - return 0; - - /* Therefore, in recovery, we can't get here */ - if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) - return -EBUSY; - - mvmvif->id = find_first_bit(data.available_mac_ids, - NUM_MAC_INDEX_DRIVER); - if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { - IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); - ret = -EIO; - goto exit_fail; - } - - if (data.preferred_tsf != NUM_TSF_IDS) - mvmvif->tsf_id = data.preferred_tsf; - else - mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, - NUM_TSF_IDS); - if (mvmvif->tsf_id == NUM_TSF_IDS) { - IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); - ret = -EIO; - goto exit_fail; - } - - mvmvif->color = 0; - - INIT_LIST_HEAD(&mvmvif->time_event_data.list); - mvmvif->time_event_data.id = TE_MAX; - - /* No need to allocate data queues to P2P Device MAC.*/ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; - - return 0; - } - - /* Find available queues, and allocate them to the ACs */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate queue\n"); - ret = -EIO; - goto exit_fail; - } - - __set_bit(queue, &used_hw_queues); - vif->hw_queue[ac] = queue; - } - - /* Allocate the CAB queue for softAP and GO interfaces */ - if (vif->type == NL80211_IFTYPE_AP) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate cab queue\n"); - ret = -EIO; - goto exit_fail; - } - - vif->cab_queue = queue; - } else { - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; - } - - mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; - - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) - mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; - - return 0; - -exit_fail: - memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); - memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue)); - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; - return ret; -} - -int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - u32 ac; - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif); - if (ret) - return ret; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); - break; - case NL80211_IFTYPE_AP: - iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - break; - } - - return 0; -} - -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - int ac; - - lockdep_assert_held(&mvm->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT, - 0); - break; - case NL80211_IFTYPE_AP: - iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - IWL_MAX_TID_COUNT, 0); - } -} - -static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - enum ieee80211_band band, - u8 *cck_rates, u8 *ofdm_rates) -{ - struct ieee80211_supported_band *sband; - unsigned long basic = vif->bss_conf.basic_rates; - int lowest_present_ofdm = 100; - int lowest_present_cck = 100; - u8 cck = 0; - u8 ofdm = 0; - int i; - - sband = mvm->hw->wiphy->bands[band]; - - for_each_set_bit(i, &basic, BITS_PER_LONG) { - int hw = sband->bitrates[i].hw_value; - if (hw >= IWL_FIRST_OFDM_RATE) { - ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); - if (lowest_present_ofdm > hw) - lowest_present_ofdm = hw; - } else { - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); - - cck |= BIT(hw); - if (lowest_present_cck > hw) - lowest_present_cck = hw; - } - } - - /* - * Now we've got the basic rates as bitmaps in the ofdm and cck - * variables. This isn't sufficient though, as there might not - * be all the right rates in the bitmap. E.g. if the only basic - * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps - * and 6 Mbps because the 802.11-2007 standard says in 9.6: - * - * [...] a STA responding to a received frame shall transmit - * its Control Response frame [...] at the highest rate in the - * BSSBasicRateSet parameter that is less than or equal to the - * rate of the immediately previous frame in the frame exchange - * sequence ([...]) and that is of the same modulation class - * ([...]) as the received frame. If no rate contained in the - * BSSBasicRateSet parameter meets these conditions, then the - * control frame sent in response to a received frame shall be - * transmitted at the highest mandatory rate of the PHY that is - * less than or equal to the rate of the received frame, and - * that is of the same modulation class as the received frame. - * - * As a consequence, we need to add all mandatory rates that are - * lower than all of the basic rates to these bitmaps. - */ - - if (IWL_RATE_24M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; - if (IWL_RATE_12M_INDEX < lowest_present_ofdm) - ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; - /* 6M already there or needed so always add */ - ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; - - /* - * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. - * Note, however: - * - if no CCK rates are basic, it must be ERP since there must - * be some basic rates at all, so they're OFDM => ERP PHY - * (or we're in 5 GHz, and the cck bitmap will never be used) - * - if 11M is a basic rate, it must be ERP as well, so add 5.5M - * - if 5.5M is basic, 1M and 2M are mandatory - * - if 2M is basic, 1M is mandatory - * - if 1M is basic, that's the only valid ACK rate. - * As a consequence, it's not as complicated as it sounds, just add - * any lower rates to the ACK rate bitmap. - */ - if (IWL_RATE_11M_INDEX < lowest_present_cck) - cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; - if (IWL_RATE_5M_INDEX < lowest_present_cck) - cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; - if (IWL_RATE_2M_INDEX < lowest_present_cck) - cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; - /* 1M already there or needed so always add */ - cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; - - *cck_rates = cck; - *ofdm_rates = ofdm; -} - -static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_ctx_cmd *cmd) -{ - /* for both sta and ap, ht_operation_mode hold the protection_mode */ - u8 protection_mode = vif->bss_conf.ht_operation_mode & - IEEE80211_HT_OP_MODE_PROTECTION; - /* The fw does not distinguish between ht and fat */ - u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; - - IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); - /* - * See section 9.23.3.1 of IEEE 80211-2012. - * Nongreenfield HT STAs Present is not supported. - */ - switch (protection_mode) { - case IEEE80211_HT_OP_MODE_PROTECTION_NONE: - break; - case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: - case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - cmd->protection_flags |= cpu_to_le32(ht_flag); - break; - case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: - /* Protect when channel wider than 20MHz */ - if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) - cmd->protection_flags |= cpu_to_le32(ht_flag); - break; - default: - IWL_ERR(mvm, "Illegal protection mode %d\n", - protection_mode); - break; - } -} - -static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_ctx_cmd *cmd, - const u8 *bssid_override, - u32 action) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_chanctx_conf *chanctx; - bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & - IEEE80211_HT_OP_MODE_PROTECTION); - u8 cck_ack_rates, ofdm_ack_rates; - const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; - int i; - - cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)); - cmd->action = cpu_to_le32(action); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (vif->p2p) - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); - else - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); - break; - case NL80211_IFTYPE_AP: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); - break; - case NL80211_IFTYPE_MONITOR: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); - break; - case NL80211_IFTYPE_P2P_DEVICE: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); - break; - case NL80211_IFTYPE_ADHOC: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); - break; - default: - WARN_ON_ONCE(1); - } - - cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); - - memcpy(cmd->node_addr, vif->addr, ETH_ALEN); - - if (bssid) - memcpy(cmd->bssid_addr, bssid, ETH_ALEN); - else - eth_broadcast_addr(cmd->bssid_addr); - - rcu_read_lock(); - chanctx = rcu_dereference(vif->chanctx_conf); - iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : IEEE80211_BAND_2GHZ, - &cck_ack_rates, &ofdm_ack_rates); - rcu_read_unlock(); - - cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); - cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); - - cmd->cck_short_preamble = - cpu_to_le32(vif->bss_conf.use_short_preamble ? - MAC_FLG_SHORT_PREAMBLE : 0); - cmd->short_slot = - cpu_to_le32(vif->bss_conf.use_short_slot ? - MAC_FLG_SHORT_SLOT : 0); - - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u8 txf = iwl_mvm_ac_to_tx_fifo[i]; - - cmd->ac[txf].cw_min = - cpu_to_le16(mvmvif->queue_params[i].cw_min); - cmd->ac[txf].cw_max = - cpu_to_le16(mvmvif->queue_params[i].cw_max); - cmd->ac[txf].edca_txop = - cpu_to_le16(mvmvif->queue_params[i].txop * 32); - cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs; - cmd->ac[txf].fifos_mask = BIT(txf); - } - - /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ - if (vif->type == NL80211_IFTYPE_AP) - cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= - BIT(IWL_MVM_TX_FIFO_MCAST); - - if (vif->bss_conf.qos) - cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - - if (vif->bss_conf.use_cts_prot) - cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - - IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", - vif->bss_conf.use_cts_prot, - vif->bss_conf.ht_operation_mode); - if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) - cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); - if (ht_enabled) - iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); - - cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); -} - -static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, - struct iwl_mac_ctx_cmd *cmd) -{ - int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, - sizeof(*cmd), cmd); - if (ret) - IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", - le32_to_cpu(cmd->action), ret); - return ret; -} - -static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action, bool force_assoc_off, - const u8 *bssid_override) -{ - struct iwl_mac_ctx_cmd cmd = {}; - struct iwl_mac_data_sta *ctxt_sta; - - WARN_ON(vif->type != NL80211_IFTYPE_STATION); - - /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); - - if (vif->p2p) { - struct ieee80211_p2p_noa_attr *noa = - &vif->bss_conf.p2p_noa_attr; - - cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & - IEEE80211_P2P_OPPPS_CTWINDOW_MASK); - ctxt_sta = &cmd.p2p_sta.sta; - } else { - ctxt_sta = &cmd.sta; - } - - /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && - !force_assoc_off) { - u32 dtim_offs; - - /* - * The DTIM count counts down, so when it is N that means N - * more beacon intervals happen until the DTIM TBTT. Therefore - * add this to the current time. If that ends up being in the - * future, the firmware will handle it. - * - * Also note that the system_timestamp (which we get here as - * "sync_device_ts") and TSF timestamp aren't at exactly the - * same offset in the frame -- the TSF is at the first symbol - * of the TSF, the system timestamp is at signal acquisition - * time. This means there's an offset between them of at most - * a few hundred microseconds (24 * 8 bits + PLCP time gives - * 384us in the longest case), this is currently not relevant - * as the firmware wakes up around 2ms before the TBTT. - */ - dtim_offs = vif->bss_conf.sync_dtim_count * - vif->bss_conf.beacon_int; - /* convert TU to usecs */ - dtim_offs *= 1024; - - ctxt_sta->dtim_tsf = - cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); - ctxt_sta->dtim_time = - cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); - - IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", - le64_to_cpu(ctxt_sta->dtim_tsf), - le32_to_cpu(ctxt_sta->dtim_time), - dtim_offs); - - ctxt_sta->is_assoc = cpu_to_le32(1); - } else { - ctxt_sta->is_assoc = cpu_to_le32(0); - - /* Allow beacons to pass through as long as we are not - * associated, or we do not have dtim period information. - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - } - - ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); - ctxt_sta->bi_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); - ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period); - ctxt_sta->dtim_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period)); - - ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); - ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); - - if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - - return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); -} - -static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) -{ - struct iwl_mac_ctx_cmd cmd = {}; - - WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); - - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | - MAC_FILTER_IN_CONTROL_AND_MGMT | - MAC_FILTER_IN_BEACON | - MAC_FILTER_IN_PROBE_REQUEST | - MAC_FILTER_IN_CRC32); - ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); - - return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); -} - -static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mac_ctx_cmd cmd = {}; - - WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); - - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | - MAC_FILTER_IN_PROBE_REQUEST); - - /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ - cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); - cmd.ibss.bi_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); - - /* TODO: Assumes that the beacon id == mac context id */ - cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); - - return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); -} - -struct iwl_mvm_go_iterator_data { - bool go_active; -}; - -static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) -{ - struct iwl_mvm_go_iterator_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (vif->type == NL80211_IFTYPE_AP && vif->p2p && - mvmvif->ap_ibss_active) - data->go_active = true; -} - -static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) -{ - struct iwl_mac_ctx_cmd cmd = {}; - struct iwl_mvm_go_iterator_data data = {}; - - WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); - - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - - /* Override the filter flags to accept only probe requests */ - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - - /* - * This flag should be set to true when the P2P Device is - * discoverable and there is at least another active P2P GO. Settings - * this flag will allow the P2P Device to be discoverable on other - * channels in addition to its listen channel. - * Note that this flag should not be set in other cases as it opens the - * Rx filters on all MAC and increases the number of interrupts. - */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_go_iterator, &data); - - cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); - return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); -} - -static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, - struct iwl_mac_beacon_cmd *beacon_cmd, - u8 *beacon, u32 frame_size) -{ - u32 tim_idx; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; - - /* The index is relative to frame start but we start looking at the - * variable-length part of the beacon. */ - tim_idx = mgmt->u.beacon.variable - beacon; - - /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ - while ((tim_idx < (frame_size - 2)) && - (beacon[tim_idx] != WLAN_EID_TIM)) - tim_idx += beacon[tim_idx+1] + 2; - - /* If TIM field was found, set variables */ - if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { - beacon_cmd->tim_idx = cpu_to_le32(tim_idx); - beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]); - } else { - IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); - } -} - -static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_host_cmd cmd = { - .id = BEACON_TEMPLATE_CMD, - .flags = CMD_ASYNC, - }; - struct iwl_mac_beacon_cmd beacon_cmd = {}; - struct ieee80211_tx_info *info; - u32 beacon_skb_len; - u32 rate, tx_flags; - - if (WARN_ON(!beacon)) - return -EINVAL; - - beacon_skb_len = beacon->len; - - /* TODO: for now the beacon template id is set to be the mac context id. - * Might be better to handle it as another resource ... */ - beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); - info = IEEE80211_SKB_CB(beacon); - - /* Set up TX command fields */ - beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len); - beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id; - beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); - tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; - tx_flags |= - iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << - TX_CMD_FLG_BT_PRIO_POS; - beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags); - - mvm->mgmt_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->mgmt_last_antenna_idx); - - beacon_cmd.tx.rate_n_flags = - cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << - RATE_MCS_ANT_POS); - - if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) { - rate = IWL_FIRST_OFDM_RATE; - } else { - rate = IWL_FIRST_CCK_RATE; - beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); - } - beacon_cmd.tx.rate_n_flags |= - cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); - - /* Set up TX beacon command fields */ - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd, - beacon->data, - beacon_skb_len); - - /* Submit command */ - cmd.len[0] = sizeof(beacon_cmd); - cmd.data[0] = &beacon_cmd; - cmd.dataflags[0] = 0; - cmd.len[1] = beacon_skb_len; - cmd.data[1] = beacon->data; - cmd.dataflags[1] = IWL_HCMD_DFL_DUP; - - return iwl_mvm_send_cmd(mvm, &cmd); -} - -/* The beacon template for the AP/GO/IBSS has changed and needs update */ -int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct sk_buff *beacon; - int ret; - - WARN_ON(vif->type != NL80211_IFTYPE_AP && - vif->type != NL80211_IFTYPE_ADHOC); - - beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); - if (!beacon) - return -ENOMEM; - - ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); - dev_kfree_skb(beacon); - return ret; -} - -struct iwl_mvm_mac_ap_iterator_data { - struct iwl_mvm *mvm; - struct ieee80211_vif *vif; - u32 beacon_device_ts; - u16 beacon_int; -}; - -/* Find the beacon_device_ts and beacon_int for a managed interface */ -static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_mac_ap_iterator_data *data = _data; - - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) - return; - - /* Station client has higher priority over P2P client*/ - if (vif->p2p && data->beacon_device_ts) - return; - - data->beacon_device_ts = vif->bss_conf.sync_device_ts; - data->beacon_int = vif->bss_conf.beacon_int; -} - -/* - * Fill the specific data for mac context of type AP of P2P GO - */ -static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_data_ap *ctxt_ap, - bool add) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_mac_ap_iterator_data data = { - .mvm = mvm, - .vif = vif, - .beacon_device_ts = 0 - }; - - ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); - ctxt_ap->bi_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); - ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period); - ctxt_ap->dtim_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period)); - - ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); - - /* - * Only set the beacon time when the MAC is being added, when we - * just modify the MAC then we should keep the time -- the firmware - * can otherwise have a "jumping" TBTT. - */ - if (add) { - /* - * If there is a station/P2P client interface which is - * associated, set the AP's TBTT far enough from the station's - * TBTT. Otherwise, set it to the current system time - */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_mac_ap_iterator, &data); - - if (data.beacon_device_ts) { - u32 rand = (prandom_u32() % (64 - 36)) + 36; - mvmvif->ap_beacon_time = data.beacon_device_ts + - ieee80211_tu_to_usec(data.beacon_int * rand / - 100); - } else { - mvmvif->ap_beacon_time = - iwl_read_prph(mvm->trans, - DEVICE_SYSTEM_TIME_REG); - } - } - - ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); - ctxt_ap->beacon_tsf = 0; /* unused */ - - /* TODO: Assume that the beacon id == mac context id */ - ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); -} - -static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mac_ctx_cmd cmd = {}; - - WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); - - /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - /* - * pass probe requests and beacons from other APs (needed - * for ht protection); when there're no any associated station - * don't ask FW to pass beacons to prevent unnecessary wake-ups. - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count) { - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); - } else { - IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); - } - - /* Fill the data specific for ap mode */ - iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, - action == FW_CTXT_ACTION_ADD); - - return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); -} - -static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) -{ - struct iwl_mac_ctx_cmd cmd = {}; - struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; - - WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); - - /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - /* - * pass probe requests and beacons from other APs (needed - * for ht protection) - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST | - MAC_FILTER_IN_BEACON); - - /* Fill the data specific for GO mode */ - iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap, - action == FW_CTXT_ACTION_ADD); - - cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & - IEEE80211_P2P_OPPPS_CTWINDOW_MASK); - cmd.go.opp_ps_enabled = - cpu_to_le32(!!(noa->oppps_ctwindow & - IEEE80211_P2P_OPPPS_ENABLE_BIT)); - - return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); -} - -static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 action, bool force_assoc_off, - const u8 *bssid_override) -{ - switch (vif->type) { - case NL80211_IFTYPE_STATION: - return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, - force_assoc_off, - bssid_override); - break; - case NL80211_IFTYPE_AP: - if (!vif->p2p) - return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); - else - return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); - break; - case NL80211_IFTYPE_MONITOR: - return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); - case NL80211_IFTYPE_P2P_DEVICE: - return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); - case NL80211_IFTYPE_ADHOC: - return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action); - default: - break; - } - - return -EOPNOTSUPP; -} - -int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", - vif->addr, ieee80211_vif_type_p2p(vif))) - return -EIO; - - ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, - true, NULL); - if (ret) - return ret; - - /* will only do anything at resume from D3 time */ - iwl_mvm_set_last_nonqos_seq(mvm, vif); - - mvmvif->uploaded = true; - return 0; -} - -int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool force_assoc_off, const u8 *bssid_override) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", - vif->addr, ieee80211_vif_type_p2p(vif))) - return -EIO; - - return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, - force_assoc_off, bssid_override); -} - -int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mac_ctx_cmd cmd; - int ret; - - if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", - vif->addr, ieee80211_vif_type_p2p(vif))) - return -EIO; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)); - cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); - - ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, - sizeof(cmd), &cmd); - if (ret) { - IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); - return ret; - } - - mvmvif->uploaded = false; - - if (vif->type == NL80211_IFTYPE_MONITOR) - __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); - - return 0; -} - -static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, - struct ieee80211_vif *csa_vif, u32 gp2, - bool tx_success) -{ - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(csa_vif); - - /* Don't start to countdown from a failed beacon */ - if (!tx_success && !mvmvif->csa_countdown) - return; - - mvmvif->csa_countdown = true; - - if (!ieee80211_csa_is_complete(csa_vif)) { - int c = ieee80211_csa_update_counter(csa_vif); - - iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); - if (csa_vif->p2p && - !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && - tx_success) { - u32 rel_time = (c + 1) * - csa_vif->bss_conf.beacon_int - - IWL_MVM_CHANNEL_SWITCH_TIME_GO; - u32 apply_time = gp2 + rel_time * 1024; - - iwl_mvm_schedule_csa_period(mvm, csa_vif, - IWL_MVM_CHANNEL_SWITCH_TIME_GO - - IWL_MVM_CHANNEL_SWITCH_MARGIN, - apply_time); - } - } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { - /* we don't have CSA NoA scheduled yet, switch now */ - ieee80211_csa_finish(csa_vif); - RCU_INIT_POINTER(mvm->csa_vif, NULL); - } -} - -void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; - struct iwl_mvm_tx_resp *beacon_notify_hdr; - struct ieee80211_vif *csa_vif; - struct ieee80211_vif *tx_blocked_vif; - u16 status; - - lockdep_assert_held(&mvm->mutex); - - beacon_notify_hdr = &beacon->beacon_notify_hdr; - mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); - - status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK; - IWL_DEBUG_RX(mvm, - "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", - status, beacon_notify_hdr->failure_frame, - le64_to_cpu(beacon->tsf), - mvm->ap_last_beacon_gp2, - le32_to_cpu(beacon_notify_hdr->initial_rate)); - - csa_vif = rcu_dereference_protected(mvm->csa_vif, - lockdep_is_held(&mvm->mutex)); - if (unlikely(csa_vif && csa_vif->csa_active)) - iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, - (status == TX_STATUS_SUCCESS)); - - tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, - lockdep_is_held(&mvm->mutex)); - if (unlikely(tx_blocked_vif)) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(tx_blocked_vif); - - /* - * The channel switch is started and we have blocked the - * stations. If this is the first beacon (the timeout wasn't - * set), set the unblock timeout, otherwise countdown - */ - if (!mvm->csa_tx_block_bcn_timeout) - mvm->csa_tx_block_bcn_timeout = - IWL_MVM_CS_UNBLOCK_TX_TIMEOUT; - else - mvm->csa_tx_block_bcn_timeout--; - - /* Check if the timeout is expired, and unblock tx */ - if (mvm->csa_tx_block_bcn_timeout == 0) { - iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); - RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); - } - } -} - -static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_missed_beacons_notif *missed_beacons = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; - struct iwl_fw_dbg_trigger_tlv *trigger; - u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; - u32 rx_missed_bcon, rx_missed_bcon_since_rx; - - if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) - return; - - rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons); - rx_missed_bcon_since_rx = - le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx); - /* - * TODO: the threshold should be adjusted based on latency conditions, - * and/or in case of a CS flow on one of the other AP vifs. - */ - if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > - IWL_MVM_MISSED_BEACONS_THRESHOLD) - ieee80211_beacon_loss(vif); - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS)) - return; - - trigger = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS); - bcon_trig = (void *)trigger->data; - stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); - stop_trig_missed_bcon_since_rx = - le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); - - /* TODO: implement start trigger */ - - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) - return; - - if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || - rx_missed_bcon >= stop_trig_missed_bcon) - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); -} - -void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacons_notif *mb = (void *)pkt->data; - - IWL_DEBUG_INFO(mvm, - "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", - le32_to_cpu(mb->mac_id), - le32_to_cpu(mb->consec_missed_beacons), - le32_to_cpu(mb->consec_missed_beacons_since_last_rx), - le32_to_cpu(mb->num_recvd_beacons), - le32_to_cpu(mb->num_expected_beacons)); - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_beacon_loss_iterator, - mb); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c deleted file mode 100644 index 1fb684693040..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ /dev/null @@ -1,4260 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "iwl-op-mode.h" -#include "iwl-io.h" -#include "mvm.h" -#include "sta.h" -#include "time-event.h" -#include "iwl-eeprom-parse.h" -#include "iwl-phy-db.h" -#include "testmode.h" -#include "iwl-fw-error-dump.h" -#include "iwl-prph.h" -#include "iwl-csr.h" -#include "iwl-nvm-parse.h" - -static const struct ieee80211_iface_limit iwl_mvm_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE), - }, -}; - -static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { - { - .num_different_channels = 2, - .max_interfaces = 3, - .limits = iwl_mvm_limits, - .n_limits = ARRAY_SIZE(iwl_mvm_limits), - }, -}; - -#ifdef CONFIG_PM_SLEEP -static const struct nl80211_wowlan_tcp_data_token_feature -iwl_mvm_wowlan_tcp_token_feature = { - .min_len = 0, - .max_len = 255, - .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS, -}; - -static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { - .tok = &iwl_mvm_wowlan_tcp_token_feature, - .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN - - sizeof(struct ethhdr) - - sizeof(struct iphdr) - - sizeof(struct tcphdr), - .data_interval_max = 65535, /* __le16 in API */ - .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN - - sizeof(struct ethhdr) - - sizeof(struct iphdr) - - sizeof(struct tcphdr), - .seq = true, -}; -#endif - -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -/* - * Use the reserved field to indicate magic values. - * these values will only be used internally by the driver, - * and won't make it to the fw (reserved will be 0). - * BC_FILTER_MAGIC_IP - configure the val of this attribute to - * be the vif's ip address. in case there is not a single - * ip address (0, or more than 1), this attribute will - * be skipped. - * BC_FILTER_MAGIC_MAC - set the val of this attribute to - * the LSB bytes of the vif's mac address - */ -enum { - BC_FILTER_MAGIC_NONE = 0, - BC_FILTER_MAGIC_IP, - BC_FILTER_MAGIC_MAC, -}; - -static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { - { - /* arp */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, - .attrs = { - { - /* frame type - arp, hw type - ethernet */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header), - .val = cpu_to_be32(0x08060001), - .mask = cpu_to_be32(0xffffffff), - }, - { - /* arp dest ip */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header) + 2 + - sizeof(struct arphdr) + - ETH_ALEN + sizeof(__be32) + - ETH_ALEN, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), - }, - }, - }, - { - /* dhcp offer bcast */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, - .attrs = { - { - /* udp dest port - 68 (bootp client)*/ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = offsetof(struct udphdr, dest), - .val = cpu_to_be32(0x00440000), - .mask = cpu_to_be32(0xffff0000), - }, - { - /* dhcp - lsb bytes of client hw address */ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = 38, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), - }, - }, - }, - /* last filter must be empty */ - {}, -}; -#endif - -void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); - spin_lock_bh(&mvm->refs_lock); - mvm->refs[ref_type]++; - spin_unlock_bh(&mvm->refs_lock); - iwl_trans_ref(mvm->trans); -} - -void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); - spin_lock_bh(&mvm->refs_lock); - WARN_ON(!mvm->refs[ref_type]--); - spin_unlock_bh(&mvm->refs_lock); - iwl_trans_unref(mvm->trans); -} - -static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, - enum iwl_mvm_ref_type except_ref) -{ - int i, j; - - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - spin_lock_bh(&mvm->refs_lock); - for (i = 0; i < IWL_MVM_REF_COUNT; i++) { - if (except_ref == i || !mvm->refs[i]) - continue; - - IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", - i, mvm->refs[i]); - for (j = 0; j < mvm->refs[i]; j++) - iwl_trans_unref(mvm->trans); - mvm->refs[i] = 0; - } - spin_unlock_bh(&mvm->refs_lock); -} - -bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) -{ - int i; - bool taken = false; - - if (!iwl_mvm_is_d0i3_supported(mvm)) - return true; - - spin_lock_bh(&mvm->refs_lock); - for (i = 0; i < IWL_MVM_REF_COUNT; i++) { - if (mvm->refs[i]) { - taken = true; - break; - } - } - spin_unlock_bh(&mvm->refs_lock); - - return taken; -} - -int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - iwl_mvm_ref(mvm, ref_type); - - if (!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), - HZ)) { - WARN_ON_ONCE(1); - iwl_mvm_unref(mvm, ref_type); - return -EIO; - } - - return 0; -} - -static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) -{ - int i; - - memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); - for (i = 0; i < NUM_PHY_CTX; i++) { - mvm->phy_ctxts[i].id = i; - mvm->phy_ctxts[i].ref = 0; - } -} - -struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, - const char *alpha2, - enum iwl_mcc_source src_id, - bool *changed) -{ - struct ieee80211_regdomain *regd = NULL; - struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mcc_update_resp *resp; - - IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); - - lockdep_assert_held(&mvm->mutex); - - resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); - if (IS_ERR_OR_NULL(resp)) { - IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", - PTR_ERR_OR_ZERO(resp)); - goto out; - } - - if (changed) - *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); - - regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, - __le32_to_cpu(resp->n_channels), - resp->channels, - __le16_to_cpu(resp->mcc)); - /* Store the return source id */ - src_id = resp->source_id; - kfree(resp); - if (IS_ERR_OR_NULL(regd)) { - IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", - PTR_ERR_OR_ZERO(regd)); - goto out; - } - - IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", - regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); - mvm->lar_regdom_set = true; - mvm->mcc_src = src_id; - -out: - return regd; -} - -void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) -{ - bool changed; - struct ieee80211_regdomain *regd; - - if (!iwl_mvm_is_lar_supported(mvm)) - return; - - regd = iwl_mvm_get_current_regdomain(mvm, &changed); - if (!IS_ERR_OR_NULL(regd)) { - /* only update the regulatory core if changed */ - if (changed) - regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); - - kfree(regd); - } -} - -struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, - bool *changed) -{ - return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", - iwl_mvm_is_wifi_mcc_supported(mvm) ? - MCC_SOURCE_GET_CURRENT : - MCC_SOURCE_OLD_FW, changed); -} - -int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) -{ - enum iwl_mcc_source used_src; - struct ieee80211_regdomain *regd; - int ret; - bool changed; - const struct ieee80211_regdomain *r = - rtnl_dereference(mvm->hw->wiphy->regd); - - if (!r) - return -ENOENT; - - /* save the last source in case we overwrite it below */ - used_src = mvm->mcc_src; - if (iwl_mvm_is_wifi_mcc_supported(mvm)) { - /* Notify the firmware we support wifi location updates */ - regd = iwl_mvm_get_current_regdomain(mvm, NULL); - if (!IS_ERR_OR_NULL(regd)) - kfree(regd); - } - - /* Now set our last stored MCC and source */ - regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, - &changed); - if (IS_ERR_OR_NULL(regd)) - return -EIO; - - /* update cfg80211 if the regdomain was changed */ - if (changed) - ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); - else - ret = 0; - - kfree(regd); - return ret; -} - -int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) -{ - struct ieee80211_hw *hw = mvm->hw; - int num_mac, ret, i; - static const u32 mvm_ciphers[] = { - WLAN_CIPHER_SUITE_WEP40, - WLAN_CIPHER_SUITE_WEP104, - WLAN_CIPHER_SUITE_TKIP, - WLAN_CIPHER_SUITE_CCMP, - }; - - /* Tell mac80211 our characteristics */ - ieee80211_hw_set(hw, SIGNAL_DBM); - ieee80211_hw_set(hw, SPECTRUM_MGMT); - ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, QUEUE_CONTROL); - ieee80211_hw_set(hw, WANT_MONITOR_VIF); - ieee80211_hw_set(hw, SUPPORTS_PS); - ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); - ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, TIMING_BEACON_ONLY); - ieee80211_hw_set(hw, CONNECTION_MONITOR); - ieee80211_hw_set(hw, CHANCTX_STA_CSA); - ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); - ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); - - hw->queues = mvm->first_agg_queue; - hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; - hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | - IEEE80211_RADIOTAP_MCS_HAVE_STBC; - hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | - IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; - hw->rate_control_algorithm = "iwl-mvm-rs"; - hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; - hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; - - BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2); - memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); - hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); - hw->wiphy->cipher_suites = mvm->ciphers; - - /* - * Enable 11w if advertised by firmware and software crypto - * is not enabled (as the firmware will interpret some mgmt - * packets, so enabling it with software crypto isn't safe) - */ - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && - !iwlwifi_mod_params.sw_crypto) { - ieee80211_hw_set(hw, MFP_CAPABLE); - mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_AES_CMAC; - hw->wiphy->n_cipher_suites++; - } - - /* currently FW API supports only one optional cipher scheme */ - if (mvm->fw->cs[0].cipher) { - mvm->hw->n_cipher_schemes = 1; - mvm->hw->cipher_schemes = &mvm->fw->cs[0]; - mvm->ciphers[hw->wiphy->n_cipher_suites] = - mvm->fw->cs[0].cipher; - hw->wiphy->n_cipher_suites++; - } - - ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); - hw->wiphy->features |= - NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | - NL80211_FEATURE_ND_RANDOM_MAC_ADDR; - - hw->sta_data_size = sizeof(struct iwl_mvm_sta); - hw->vif_data_size = sizeof(struct iwl_mvm_vif); - hw->chanctx_data_size = sizeof(u16); - - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_DEVICE) | - BIT(NL80211_IFTYPE_ADHOC); - - hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; - hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; - if (iwl_mvm_is_lar_supported(mvm)) - hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; - else - hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | - REGULATORY_DISABLE_BEACON_HINTS; - - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) - hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - - hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; - - hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwl_mvm_iface_combinations); - - hw->wiphy->max_remain_on_channel_duration = 10000; - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - /* we can compensate an offset of up to 3 channels = 15 MHz */ - hw->wiphy->max_adj_channel_rssi_comp = 3 * 5; - - /* Extract MAC address */ - memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); - hw->wiphy->addresses = mvm->addresses; - hw->wiphy->n_addresses = 1; - - /* Extract additional MAC addresses if available */ - num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? - min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; - - for (i = 1; i < num_mac; i++) { - memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, - ETH_ALEN); - mvm->addresses[i].addr[5]++; - hw->wiphy->n_addresses++; - } - - iwl_mvm_reset_phy_ctxts(mvm); - - hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); - - hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; - - BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); - BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || - IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; - else - mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; - - if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; - if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) { - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BEAMFORMER) && - fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_LQ_SS_PARAMS)) - hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= - IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; - } - - hw->wiphy->hw_version = mvm->trans->hw_id; - - if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) - hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; - else - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; - hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; - hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; - /* we create the 802.11 header and zero length SSID IE. */ - hw->wiphy->max_sched_scan_ie_len = - SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; - hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; - hw->wiphy->max_sched_scan_plan_interval = U16_MAX; - - /* - * the firmware uses u8 for num of iterations, but 0xff is saved for - * infinite loop, so the maximum number of iterations is actually 254. - */ - hw->wiphy->max_sched_scan_plan_iterations = 254; - - hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | - NL80211_FEATURE_LOW_PRIORITY_SCAN | - NL80211_FEATURE_P2P_GO_OPPPS | - NL80211_FEATURE_DYNAMIC_SMPS | - NL80211_FEATURE_STATIC_SMPS | - NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) - hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) - hw->wiphy->features |= NL80211_FEATURE_QUIET; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) - hw->wiphy->features |= - NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) - hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; - - mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; - -#ifdef CONFIG_PM_SLEEP - if (iwl_mvm_is_d0i3_supported(mvm) && - device_can_wakeup(mvm->trans->dev)) { - mvm->wowlan.flags = WIPHY_WOWLAN_ANY; - hw->wiphy->wowlan = &mvm->wowlan; - } - - if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && - mvm->trans->ops->d3_suspend && - mvm->trans->ops->d3_resume && - device_can_wakeup(mvm->trans->dev)) { - mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE | - WIPHY_WOWLAN_NET_DETECT; - if (!iwlwifi_mod_params.sw_crypto) - mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - - mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; - mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; - mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; - mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; - mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; - hw->wiphy->wowlan = &mvm->wowlan; - } -#endif - -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* assign default bcast filtering configuration */ - mvm->bcast_filters = iwl_mvm_default_bcast_filters; -#endif - - ret = iwl_mvm_leds_init(mvm); - if (ret) - return ret; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { - IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; - ieee80211_hw_set(hw, TDLS_WIDER_BW); - } - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { - IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); - hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; - } - - hw->netdev_features |= mvm->cfg->features; - if (!iwl_mvm_is_csum_supported(mvm)) - hw->netdev_features &= ~NETIF_F_RXCSUM; - - ret = ieee80211_register_hw(mvm->hw); - if (ret) - iwl_mvm_leds_exit(mvm); - - return ret; -} - -static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct sk_buff *skb) -{ - struct iwl_mvm_sta *mvmsta; - bool defer = false; - - /* - * double check the IN_D0I3 flag both before and after - * taking the spinlock, in order to prevent taking - * the spinlock when not needed. - */ - if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) - return false; - - spin_lock(&mvm->d0i3_tx_lock); - /* - * testing the flag again ensures the skb dequeue - * loop (on d0i3 exit) hasn't run yet. - */ - if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - goto out; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->sta_id == IWL_MVM_STATION_COUNT || - mvmsta->sta_id != mvm->d0i3_ap_sta_id) - goto out; - - __skb_queue_tail(&mvm->d0i3_tx, skb); - ieee80211_stop_queues(mvm->hw); - - /* trigger wakeup */ - iwl_mvm_ref(mvm, IWL_MVM_REF_TX); - iwl_mvm_unref(mvm, IWL_MVM_REF_TX); - - defer = true; -out: - spin_unlock(&mvm->d0i3_tx_lock); - return defer; -} - -static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct ieee80211_sta *sta = control->sta; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (void *)skb->data; - - if (iwl_mvm_is_radio_killed(mvm)) { - IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); - goto drop; - } - - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && - !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && - !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) - goto drop; - - /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ - if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER && - ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control))) - sta = NULL; - - if (sta) { - if (iwl_mvm_defer_tx(mvm, sta, skb)) - return; - if (iwl_mvm_tx_skb(mvm, skb, sta)) - goto drop; - return; - } - - if (iwl_mvm_tx_skb_non_sta(mvm, skb)) - goto drop; - return; - drop: - ieee80211_free_txskb(hw, skb); -} - -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; -} - -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; - - /* enabled by default */ - return true; -} - -#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ - do { \ - if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ - } while (0) - -static void -iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, - enum ieee80211_ampdu_mlme_action action) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_ba *ba_trig; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); - ba_trig = (void *)trig->data; - - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) - return; - - switch (action) { - case IEEE80211_AMPDU_TX_OPERATIONAL: { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - - CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, - "TX AGG START: MAC %pM tid %d ssn %d\n", - sta->addr, tid, tid_data->ssn); - break; - } - case IEEE80211_AMPDU_TX_STOP_CONT: - CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, - "TX AGG STOP: MAC %pM tid %d\n", - sta->addr, tid); - break; - case IEEE80211_AMPDU_RX_START: - CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, - "RX AGG START: MAC %pM tid %d ssn %d\n", - sta->addr, tid, rx_ba_ssn); - break; - case IEEE80211_AMPDU_RX_STOP: - CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, - "RX AGG STOP: MAC %pM tid %d\n", - sta->addr, tid); - break; - default: - break; - } -} - -static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size, bool amsdu) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - bool tx_agg_ref = false; - - IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", - sta->addr, tid, action); - - if (!(mvm->nvm_data->sku_cap_11n_enable)) - return -EACCES; - - /* return from D0i3 before starting a new Tx aggregation */ - switch (action) { - case IEEE80211_AMPDU_TX_START: - case IEEE80211_AMPDU_TX_STOP_CONT: - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - case IEEE80211_AMPDU_TX_OPERATIONAL: - /* - * for tx start, wait synchronously until D0i3 exit to - * get the correct sequence number for the tid. - * additionally, some other ampdu actions use direct - * target access, which is not handled automatically - * by the trans layer (unlike commands), so wait for - * d0i3 exit in these cases as well. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); - if (ret) - return ret; - - tx_agg_ref = true; - break; - default: - break; - } - - mutex_lock(&mvm->mutex); - - switch (action) { - case IEEE80211_AMPDU_RX_START: - if (!iwl_enable_rx_ampdu(mvm->cfg)) { - ret = -EINVAL; - break; - } - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); - break; - case IEEE80211_AMPDU_RX_STOP: - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); - break; - case IEEE80211_AMPDU_TX_START: - if (!iwl_enable_tx_ampdu(mvm->cfg)) { - ret = -EINVAL; - break; - } - ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); - break; - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); - break; - default: - WARN_ON_ONCE(1); - ret = -EINVAL; - break; - } - - if (!ret) { - u16 rx_ba_ssn = 0; - - if (action == IEEE80211_AMPDU_RX_START) - rx_ba_ssn = *ssn; - - iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, - rx_ba_ssn, action); - } - mutex_unlock(&mvm->mutex); - - /* - * If the tid is marked as started, we won't use it for offloaded - * traffic on the next D0i3 entry. It's safe to unref. - */ - if (tx_agg_ref) - iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); - - return ret; -} - -static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->uploaded = false; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; - - spin_lock_bh(&mvm->time_event_lock); - iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); - spin_unlock_bh(&mvm->time_event_lock); - - mvmvif->phy_ctxt = NULL; - memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); -} - -static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen) -{ - const struct iwl_mvm_dump_ptrs *dump_ptrs = data; - ssize_t bytes_read; - ssize_t bytes_read_trans; - - if (offset < dump_ptrs->op_mode_len) { - bytes_read = min_t(ssize_t, count, - dump_ptrs->op_mode_len - offset); - memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset, - bytes_read); - offset += bytes_read; - count -= bytes_read; - - if (count == 0) - return bytes_read; - } else { - bytes_read = 0; - } - - if (!dump_ptrs->trans_ptr) - return bytes_read; - - offset -= dump_ptrs->op_mode_len; - bytes_read_trans = min_t(ssize_t, count, - dump_ptrs->trans_ptr->len - offset); - memcpy(buffer + bytes_read, - (u8 *)dump_ptrs->trans_ptr->data + offset, - bytes_read_trans); - - return bytes_read + bytes_read_trans; -} - -static void iwl_mvm_free_coredump(const void *data) -{ - const struct iwl_mvm_dump_ptrs *fw_error_dump = data; - - vfree(fw_error_dump->op_mode_ptr); - vfree(fw_error_dump->trans_ptr); - kfree(fw_error_dump); -} - -static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) -{ - struct iwl_fw_error_dump_fifo *fifo_hdr; - u32 *fifo_data; - u32 fifo_len; - unsigned long flags; - int i, j; - - if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) - return; - - /* Pull RXF data from all RXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) { - /* - * Keep aside the additional offset that might be needed for - * next RXF - */ - u32 offset_diff = RXF_DIFF_FROM_PREV * i; - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the RXF */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_D_SPACE + - offset_diff)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_WR_PTR + - offset_diff)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_RD_PTR + - offset_diff)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_FENCE_PTR + - offset_diff)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_SET_FENCE_MODE + - offset_diff)); - - /* Lock fence */ - iwl_trans_write_prph(mvm->trans, - RXF_SET_FENCE_MODE + offset_diff, 0x1); - /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_WR2FENCE + offset_diff, 0x1); - /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_FENCE_OFFSET_ADDR + offset_diff, - 0x0); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - RXF_FIFO_RD_FENCE_INC + - offset_diff); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - - /* Pull TXF data from all TXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the FIFO */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FIFO_ITEM_CNT)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_WR_PTR)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_RD_PTR)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FENCE_PTR)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_LOCK_FENCE)); - - /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, - TXF_WR_PTR); - - /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - TXF_READ_MODIFY_DATA); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - - iwl_trans_release_nic_access(mvm->trans, &flags); -} - -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) -{ - if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert || - !mvm->fw_dump_desc) - return; - - kfree(mvm->fw_dump_desc); - mvm->fw_dump_desc = NULL; -} - -#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ -#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ - -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) -{ - struct iwl_fw_error_dump_file *dump_file; - struct iwl_fw_error_dump_data *dump_data; - struct iwl_fw_error_dump_info *dump_info; - struct iwl_fw_error_dump_mem *dump_mem; - struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_mvm_dump_ptrs *fw_error_dump; - u32 sram_len, sram_ofs; - u32 file_len, fifo_data_len = 0; - u32 smem_len = mvm->cfg->smem_len; - u32 sram2_len = mvm->cfg->dccm2_len; - bool monitor_dump_only = false; - - lockdep_assert_held(&mvm->mutex); - - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); - return; - } - - if (mvm->fw_dump_trig && - mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) - monitor_dump_only = true; - - fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); - if (!fw_error_dump) - return; - - /* SRAM - include stack CCM if driver knows the values for it */ - if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { - const struct fw_img *img; - - img = &mvm->fw->img[mvm->cur_ucode]; - sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; - sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; - } else { - sram_ofs = mvm->cfg->dccm_offset; - sram_len = mvm->cfg->dccm_len; - } - - /* reading RXF/TXF sizes */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; - int i; - - fifo_data_len = 0; - - /* Count RXF size */ - for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { - if (!mem_cfg->rxfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += mem_cfg->rxfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) { - if (!mem_cfg->txfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += mem_cfg->txfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } - - file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 2 + - sram_len + sizeof(*dump_mem) + - fifo_data_len + - sizeof(*dump_info); - - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; - - /* Make room for fw's virtual image pages, if it exists */ - if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) - file_len += mvm->num_of_paging_blk * - (sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_paging) + - PAGING_BLOCK_SIZE); - - /* If we only want a monitor dump, reset the file length */ - if (monitor_dump_only) { - file_len = sizeof(*dump_file) + sizeof(*dump_data) + - sizeof(*dump_info); - } - - /* - * In 8000 HW family B-step include the ICCM (which resides separately) - */ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - IWL8260_ICCM_LEN; - - if (mvm->fw_dump_desc) - file_len += sizeof(*dump_data) + sizeof(*dump_trig) + - mvm->fw_dump_desc->len; - - dump_file = vzalloc(file_len); - if (!dump_file) { - kfree(fw_error_dump); - iwl_mvm_free_fw_dump_desc(mvm); - return; - } - - fw_error_dump->op_mode_ptr = dump_file; - - dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); - dump_data = (void *)dump_file->data; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_info)); - dump_info = (void *) dump_data->data; - dump_info->device_family = - mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, - sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, mvm->cfg->name, - sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, - sizeof(dump_info->bus_human_readable)); - - dump_data = iwl_fw_error_next_data(dump_data); - /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) - iwl_mvm_dump_fifos(mvm, &dump_data); - - if (mvm->fw_dump_desc) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_trig) + - mvm->fw_dump_desc->len); - dump_trig = (void *)dump_data->data; - memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, - sizeof(*dump_trig) + mvm->fw_dump_desc->len); - - /* now we can free this copy */ - iwl_mvm_free_fw_dump_desc(mvm); - dump_data = iwl_fw_error_next_data(dump_data); - } - - /* In case we only want monitor dump, skip to dump trasport data */ - if (monitor_dump_only) - goto dump_trans_data; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, - sram_len); - - if (smem_len) { - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, - dump_mem->data, smem_len); - } - - if (sram2_len) { - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, - dump_mem->data, sram2_len); - } - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + - sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); - iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, - dump_mem->data, IWL8260_ICCM_LEN); - } - - /* Dump fw's virtual image */ - if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { - u32 i; - - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { - struct iwl_fw_error_dump_paging *paging; - struct page *pages = - mvm->fw_paging_db[i].fw_paging_block; - - dump_data = iwl_fw_error_next_data(dump_data); - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); - dump_data->len = cpu_to_le32(sizeof(*paging) + - PAGING_BLOCK_SIZE); - paging = (void *)dump_data->data; - paging->index = cpu_to_le32(i); - memcpy(paging->data, page_address(pages), - PAGING_BLOCK_SIZE); - } - } - -dump_trans_data: - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, - mvm->fw_dump_trig); - fw_error_dump->op_mode_len = file_len; - if (fw_error_dump->trans_ptr) - file_len += fw_error_dump->trans_ptr->len; - dump_file->file_len = cpu_to_le32(file_len); - - dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, - GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); - - mvm->fw_dump_trig = NULL; - clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); -} - -struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { - .trig_desc = { - .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), - }, -}; - -static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) -{ - /* clear the D3 reconfig, we only need it to avoid dumping a - * firmware coredump on reconfiguration, we shouldn't do that - * on D3->D0 transition - */ - if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { - mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; - iwl_mvm_fw_error_dump(mvm); - } - - /* cleanup all stale references (scan, roc), but keep the - * ucode_down ref until reconfig is complete - */ - iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); - - iwl_trans_stop_device(mvm->trans); - - mvm->scan_status = 0; - mvm->ps_disabled = false; - mvm->calibrating = false; - - /* just in case one was running */ - ieee80211_remain_on_channel_expired(mvm->hw); - - /* - * cleanup all interfaces, even inactive ones, as some might have - * gone down during the HW restart - */ - ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); - - mvm->p2p_device_vif = NULL; - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - - iwl_mvm_reset_phy_ctxts(mvm); - memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); - memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); - memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); - memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); - memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); - memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); - memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk)); - memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk)); - - ieee80211_wake_queues(mvm->hw); - - /* clear any stale d0i3 state */ - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - - mvm->vif_count = 0; - mvm->rx_ba_sessions = 0; - mvm->fw_dbg_conf = FW_DBG_INVALID; - - /* keep statistics ticking */ - iwl_mvm_accu_radio_stats(mvm); -} - -int __iwl_mvm_mac_start(struct iwl_mvm *mvm) -{ - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Clean up some internal and mac80211 state on restart */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_restart_cleanup(mvm); - - ret = iwl_mvm_up(mvm); - - if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - /* Something went wrong - we need to finish some cleanup - * that normally iwl_mvm_mac_restart_complete() below - * would do. - */ - clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_mvm_d0i3_enable_tx(mvm, NULL); - } - - return ret; -} - -static int iwl_mvm_mac_start(struct ieee80211_hw *hw) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - /* Some hw restart cleanups must not hold the mutex */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - /* - * Make sure we are out of d0i3. This is needed - * to make sure the reference accounting is correct - * (and there is no stale d0i3_exit_work). - */ - wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ); - } - - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_start(mvm); - mutex_unlock(&mvm->mutex); - - return ret; -} - -static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) -{ - int ret; - - mutex_lock(&mvm->mutex); - - clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_mvm_d0i3_enable_tx(mvm, NULL); - ret = iwl_mvm_update_quotas(mvm, true, NULL); - if (ret) - IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", - ret); - - /* allow transport/FW low power modes */ - iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); - - /* - * If we have TDLS peers, remove them. We don't know the last seqno/PN - * of packets the FW sent out, so we must reconnect. - */ - iwl_mvm_teardown_tdls_peers(mvm); - - mutex_unlock(&mvm->mutex); -} - -static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) - if (!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ)) - WARN_ONCE(1, "D0i3 exit on resume timed out\n"); -} - -static void -iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, - enum ieee80211_reconfig_type reconfig_type) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - switch (reconfig_type) { - case IEEE80211_RECONFIG_TYPE_RESTART: - iwl_mvm_restart_complete(mvm); - break; - case IEEE80211_RECONFIG_TYPE_SUSPEND: - iwl_mvm_resume_complete(mvm); - break; - } -} - -void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) -{ - lockdep_assert_held(&mvm->mutex); - - /* firmware counters are obviously reset now, but we shouldn't - * partially track so also clear the fw_reset_accu counters. - */ - memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); - - /* - * Disallow low power states when the FW is down by taking - * the UCODE_DOWN ref. in case of ongoing hw restart the - * ref is already taken, so don't take it again. - */ - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - - /* async_handlers_wk is now blocked */ - - /* - * The work item could be running or queued if the - * ROC time event stops just as we get here. - */ - flush_work(&mvm->roc_done_wk); - - iwl_trans_stop_device(mvm->trans); - - iwl_mvm_async_handlers_purge(mvm); - /* async_handlers_list is empty and will stay empty: HW is stopped */ - - /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_del_aux_sta(mvm); - - /* - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() - * won't be called in this case). - * But make sure to cleanup interfaces that have gone down before/during - * HW restart was requested. - */ - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - ieee80211_iterate_interfaces(mvm->hw, 0, - iwl_mvm_cleanup_iterator, mvm); - - /* We shouldn't have any UIDs still set. Loop over all the UIDs to - * make sure there's nothing left there and warn if any is found. - */ - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - int i; - - for (i = 0; i < mvm->max_scans; i++) { - if (WARN_ONCE(mvm->scan_uid_status[i], - "UMAC scan UID %d status was not cleaned\n", - i)) - mvm->scan_uid_status[i] = 0; - } - } - - mvm->ucode_loaded = false; -} - -static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - flush_work(&mvm->d0i3_exit_work); - flush_work(&mvm->async_handlers_wk); - cancel_delayed_work_sync(&mvm->fw_dump_wk); - iwl_mvm_free_fw_dump_desc(mvm); - - mutex_lock(&mvm->mutex); - __iwl_mvm_mac_stop(mvm); - mutex_unlock(&mvm->mutex); - - /* - * The worker might have been waiting for the mutex, let it run and - * discover that its list is now empty. - */ - cancel_work_sync(&mvm->async_handlers_wk); -} - -static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) -{ - u16 i; - - lockdep_assert_held(&mvm->mutex); - - for (i = 0; i < NUM_PHY_CTX; i++) - if (!mvm->phy_ctxts[i].ref) - return &mvm->phy_ctxts[i]; - - IWL_ERR(mvm, "No available PHY context\n"); - return NULL; -} - -static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - s16 tx_power) -{ - struct iwl_dev_tx_power_cmd cmd = { - .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .v2.mac_context_id = - cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .v2.pwr_restriction = cpu_to_le16(8 * tx_power), - }; - int len = sizeof(cmd); - - if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN)) - len = sizeof(cmd.v2); - - return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); -} - -static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - mvmvif->mvm = mvm; - - /* - * make sure D0i3 exit is completed, otherwise a target access - * during tx queue configuration could be done when still in - * D0i3 state. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); - if (ret) - return ret; - - /* - * Not much to do here. The stack will not allow interface - * types or combinations that we didn't advertise, so we - * don't really have to check the types. - */ - - mutex_lock(&mvm->mutex); - - /* make sure that beacon statistics don't go backwards with FW reset */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; - - /* Allocate resources for the MAC context, and add it to the fw */ - ret = iwl_mvm_mac_ctxt_init(mvm, vif); - if (ret) - goto out_unlock; - - /* Counting number of interfaces is needed for legacy PM */ - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count++; - - /* - * The AP binding flow can be done only after the beacon - * template is configured (which happens only in the mac80211 - * start_ap() flow), and adding the broadcast station can happen - * only after the binding. - * In addition, since modifying the MAC before adding a bcast - * station is not allowed by the FW, delay the adding of MAC context to - * the point where we can also add the bcast station. - * In short: there's not much we can do at this point, other than - * allocating resources :) - */ - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) { - ret = iwl_mvm_alloc_bcast_sta(mvm, vif); - if (ret) { - IWL_ERR(mvm, "Failed to allocate bcast sta\n"); - goto out_release; - } - - iwl_mvm_vif_dbgfs_register(mvm, vif); - goto out_unlock; - } - - mvmvif->features |= hw->netdev_features; - - ret = iwl_mvm_mac_ctxt_add(mvm, vif); - if (ret) - goto out_release; - - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - goto out_remove_mac; - - /* beacon filtering */ - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_remove_mac; - - if (!mvm->bf_allowed_vif && - vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { - mvm->bf_allowed_vif = mvmvif; - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | - IEEE80211_VIF_SUPPORTS_CQM_RSSI; - } - - /* - * P2P_DEVICE interface does not have a channel context assigned to it, - * so a dedicated PHY context is allocated to it and the corresponding - * MAC context is bound to it at this stage. - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - - mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->phy_ctxt) { - ret = -ENOSPC; - goto out_free_bf; - } - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (ret) - goto out_unref_phy; - - ret = iwl_mvm_add_bcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* Save a pointer to p2p device vif, so it can later be used to - * update the p2p device MAC when a GO is started/stopped */ - mvm->p2p_device_vif = vif; - } - - iwl_mvm_vif_dbgfs_register(mvm, vif); - goto out_unlock; - - out_unbind: - iwl_mvm_binding_remove_vif(mvm, vif); - out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - out_free_bf: - if (mvm->bf_allowed_vif == mvmvif) { - mvm->bf_allowed_vif = NULL; - vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | - IEEE80211_VIF_SUPPORTS_CQM_RSSI); - } - out_remove_mac: - mvmvif->phy_ctxt = NULL; - iwl_mvm_mac_ctxt_remove(mvm, vif); - out_release: - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; - - iwl_mvm_mac_ctxt_release(mvm, vif); - out_unlock: - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); - - return ret; -} - -static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); - - if (tfd_msk) { - /* - * mac80211 first removes all the stations of the vif and - * then removes the vif. When it removes a station it also - * flushes the AMPDU session. So by now, all the AMPDU sessions - * of all the stations of this vif are closed, and the queues - * of these AMPDU sessions are properly closed. - * We still need to take care of the shared queues of the vif. - * Flush them here. - */ - mutex_lock(&mvm->mutex); - iwl_mvm_flush_tx_path(mvm, tfd_msk, 0); - mutex_unlock(&mvm->mutex); - - /* - * There are transports that buffer a few frames in the host. - * For these, the flush above isn't enough since while we were - * flushing, the transport might have sent more frames to the - * device. To solve this, wait here until the transport is - * empty. Technically, this could have replaced the flush - * above, but flush is much faster than draining. So flush - * first, and drain to make sure we have no frames in the - * transport anymore. - * If a station still had frames on the shared queues, it is - * already marked as draining, so to complete the draining, we - * just need to wait until the transport is empty. - */ - iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk); - } - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - /* - * Flush the ROC worker which will flush the OFFCHANNEL queue. - * We assume here that all the packets sent to the OFFCHANNEL - * queue are sent in ROC session. - */ - flush_work(&mvm->roc_done_wk); - } else { - /* - * By now, all the AC queues are empty. The AGG queues are - * empty too. We already got all the Tx responses for all the - * packets in the queues. The drain work can have been - * triggered. Flush it. - */ - flush_work(&mvm->sta_drained_wk); - } -} - -static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - iwl_mvm_prepare_mac_removal(mvm, vif); - - mutex_lock(&mvm->mutex); - - if (mvm->bf_allowed_vif == mvmvif) { - mvm->bf_allowed_vif = NULL; - vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | - IEEE80211_VIF_SUPPORTS_CQM_RSSI); - } - - iwl_mvm_vif_dbgfs_clean(mvm, vif); - - /* - * For AP/GO interface, the tear down of the resources allocated to the - * interface is be handled as part of the stop_ap flow. - */ - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) { -#ifdef CONFIG_NL80211_TESTMODE - if (vif == mvm->noa_vif) { - mvm->noa_vif = NULL; - mvm->noa_duration = 0; - } -#endif - iwl_mvm_dealloc_bcast_sta(mvm, vif); - goto out_release; - } - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvm->p2p_device_vif = NULL; - iwl_mvm_rm_bcast_sta(mvm, vif); - iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - mvmvif->phy_ctxt = NULL; - } - - if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; - - iwl_mvm_power_update_mac(mvm); - iwl_mvm_mac_ctxt_remove(mvm, vif); - -out_release: - iwl_mvm_mac_ctxt_release(mvm, vif); - mutex_unlock(&mvm->mutex); -} - -static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - return 0; -} - -struct iwl_mvm_mc_iter_data { - struct iwl_mvm *mvm; - int port_id; -}; - -static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_mc_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; - int ret, len; - - /* if we don't have free ports, mcast frames will be dropped */ - if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) - return; - - if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) - return; - - cmd->port_id = data->port_id++; - memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); - len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); - - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); - if (ret) - IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); -} - -static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) -{ - struct iwl_mvm_mc_iter_data iter_data = { - .mvm = mvm, - }; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) - return; - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_mc_iface_iterator, &iter_data); -} - -static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, - struct netdev_hw_addr_list *mc_list) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mcast_filter_cmd *cmd; - struct netdev_hw_addr *addr; - int addr_count; - bool pass_all; - int len; - - addr_count = netdev_hw_addr_list_count(mc_list); - pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || - IWL_MVM_FW_MCAST_FILTER_PASS_ALL; - if (pass_all) - addr_count = 0; - - len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); - cmd = kzalloc(len, GFP_ATOMIC); - if (!cmd) - return 0; - - if (pass_all) { - cmd->pass_all = 1; - return (u64)(unsigned long)cmd; - } - - netdev_hw_addr_list_for_each(addr, mc_list) { - IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", - cmd->count, addr->addr); - memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], - addr->addr, ETH_ALEN); - cmd->count++; - } - - return (u64)(unsigned long)cmd; -} - -static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; - - mutex_lock(&mvm->mutex); - - /* replace previous configuration */ - kfree(mvm->mcast_filter_cmd); - mvm->mcast_filter_cmd = cmd; - - if (!cmd) - goto out; - - iwl_mvm_recalc_multicast(mvm); -out: - mutex_unlock(&mvm->mutex); - *total_flags = 0; -} - -static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - unsigned int filter_flags, - unsigned int changed_flags) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - /* We support only filter for probe requests */ - if (!(changed_flags & FIF_PROBE_REQ)) - return; - - /* Supported only for p2p client interfaces */ - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || - !vif->p2p) - return; - - mutex_lock(&mvm->mutex); - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - mutex_unlock(&mvm->mutex); -} - -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -struct iwl_bcast_iter_data { - struct iwl_mvm *mvm; - struct iwl_bcast_filter_cmd *cmd; - u8 current_filter; -}; - -static void -iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, - const struct iwl_fw_bcast_filter *in_filter, - struct iwl_fw_bcast_filter *out_filter) -{ - struct iwl_fw_bcast_filter_attr *attr; - int i; - - memcpy(out_filter, in_filter, sizeof(*out_filter)); - - for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { - attr = &out_filter->attrs[i]; - - if (!attr->mask) - break; - - switch (attr->reserved1) { - case cpu_to_le16(BC_FILTER_MAGIC_IP): - if (vif->bss_conf.arp_addr_cnt != 1) { - attr->mask = 0; - continue; - } - - attr->val = vif->bss_conf.arp_addr_list[0]; - break; - case cpu_to_le16(BC_FILTER_MAGIC_MAC): - attr->val = *(__be32 *)&vif->addr[2]; - break; - default: - break; - } - attr->reserved1 = 0; - out_filter->num_attrs++; - } -} - -static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_bcast_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_bcast_filter_cmd *cmd = data->cmd; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_fw_bcast_mac *bcast_mac; - int i; - - if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) - return; - - bcast_mac = &cmd->macs[mvmvif->id]; - - /* - * enable filtering only for associated stations, but not for P2P - * Clients - */ - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || - !vif->bss_conf.assoc) - return; - - bcast_mac->default_discard = 1; - - /* copy all configured filters */ - for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { - /* - * Make sure we don't exceed our filters limit. - * if there is still a valid filter to be configured, - * be on the safe side and just allow bcast for this mac. - */ - if (WARN_ON_ONCE(data->current_filter >= - ARRAY_SIZE(cmd->filters))) { - bcast_mac->default_discard = 0; - bcast_mac->attached_filters = 0; - break; - } - - iwl_mvm_set_bcast_filter(vif, - &mvm->bcast_filters[i], - &cmd->filters[data->current_filter]); - - /* skip current filter if it contains no attributes */ - if (!cmd->filters[data->current_filter].num_attrs) - continue; - - /* attach the filter to current mac */ - bcast_mac->attached_filters |= - cpu_to_le16(BIT(data->current_filter)); - - data->current_filter++; - } -} - -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd) -{ - struct iwl_bcast_iter_data iter_data = { - .mvm = mvm, - .cmd = cmd, - }; - - if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) - return false; - - memset(cmd, 0, sizeof(*cmd)); - cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); - cmd->max_macs = ARRAY_SIZE(cmd->macs); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* use debugfs filters/macs if override is configured */ - if (mvm->dbgfs_bcast_filtering.override) { - memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, - sizeof(cmd->filters)); - memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, - sizeof(cmd->macs)); - return true; - } -#endif - - /* if no filters are configured, do nothing */ - if (!mvm->bcast_filters) - return false; - - /* configure and attach these filters for each associated sta vif */ - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bcast_filter_iterator, &iter_data); - - return true; -} -static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_bcast_filter_cmd cmd; - - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) - return 0; - - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); -} -#else -static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - return 0; -} -#endif - -static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - /* - * Re-calculate the tsf id, as the master-slave relations depend on the - * beacon interval, which was not known when the station interface was - * added. - */ - if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) - iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); - - /* - * If we're not associated yet, take the (new) BSSID before associating - * so the firmware knows. If we're already associated, then use the old - * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC - * branch for disassociation below. - */ - if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) - memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); - - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); - if (ret) - IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); - - /* after sending it once, adopt mac80211 data */ - memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); - mvmvif->associated = bss_conf->assoc; - - if (changes & BSS_CHANGED_ASSOC) { - if (bss_conf->assoc) { - /* clear statistics to get clean beacon counter */ - iwl_mvm_request_statistics(mvm, true); - memset(&mvmvif->beacon_stats, 0, - sizeof(mvmvif->beacon_stats)); - - /* add quota for this interface */ - ret = iwl_mvm_update_quotas(mvm, true, NULL); - if (ret) { - IWL_ERR(mvm, "failed to update quotas\n"); - return; - } - - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)) { - /* - * If we're restarting then the firmware will - * obviously have lost synchronisation with - * the AP. It will attempt to synchronise by - * itself, but we can make it more reliable by - * scheduling a session protection time event. - * - * The firmware needs to receive a beacon to - * catch up with synchronisation, use 110% of - * the beacon interval. - * - * Set a large maximum delay to allow for more - * than a single interface. - */ - u32 dur = (11 * vif->bss_conf.beacon_int) / 10; - iwl_mvm_protect_session(mvm, vif, dur, dur, - 5 * dur, false); - } - - iwl_mvm_sf_update(mvm, vif, false); - iwl_mvm_power_vif_assoc(mvm, vif); - if (vif->p2p) { - iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); - iwl_mvm_update_smps(mvm, vif, - IWL_MVM_SMPS_REQ_PROT, - IEEE80211_SMPS_DYNAMIC); - } - } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { - /* - * If update fails - SF might be running in associated - * mode while disassociated - which is forbidden. - */ - WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), - "Failed to update SF upon disassociation\n"); - - /* remove AP station now that the MAC is unassoc */ - ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); - if (ret) - IWL_ERR(mvm, "failed to remove AP station\n"); - - if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; - /* remove quota for this interface */ - ret = iwl_mvm_update_quotas(mvm, false, NULL); - if (ret) - IWL_ERR(mvm, "failed to update quotas\n"); - - if (vif->p2p) - iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); - - /* this will take the cleared BSSID from bss_conf */ - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - if (ret) - IWL_ERR(mvm, - "failed to update MAC %pM (clear after unassoc)\n", - vif->addr); - } - - iwl_mvm_recalc_multicast(mvm); - iwl_mvm_configure_bcast_filter(mvm, vif); - - /* reset rssi values */ - mvmvif->bf_data.ave_beacon_signal = 0; - - iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, - IEEE80211_SMPS_AUTOMATIC); - } else if (changes & BSS_CHANGED_BEACON_INFO) { - /* - * We received a beacon _after_ association so - * remove the session protection. - */ - iwl_mvm_remove_time_event(mvm, mvmvif, - &mvmvif->time_event_data); - } - - if (changes & BSS_CHANGED_BEACON_INFO) { - iwl_mvm_sf_update(mvm, vif, false); - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - } - - if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); - } - - if (changes & BSS_CHANGED_TXPOWER) { - IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", - bss_conf->txpower); - iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); - } - - if (changes & BSS_CHANGED_CQM) { - IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); - /* reset cqm events tracking */ - mvmvif->bf_data.last_cqm_event = 0; - if (mvmvif->bf_data.bf_enabled) { - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - IWL_ERR(mvm, - "failed to update CQM thresholds\n"); - } - } - - if (changes & BSS_CHANGED_ARP_FILTER) { - IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); - iwl_mvm_configure_bcast_filter(mvm, vif); - } -} - -static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - /* - * iwl_mvm_mac_ctxt_add() might read directly from the device - * (the system time), so make sure it is available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); - if (ret) - return ret; - - mutex_lock(&mvm->mutex); - - /* Send the beacon template */ - ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); - if (ret) - goto out_unlock; - - /* - * Re-calculate the tsf id, as the master-slave relations depend on the - * beacon interval, which was not known when the AP interface was added. - */ - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); - - mvmvif->ap_assoc_sta_count = 0; - - /* Add the mac context */ - ret = iwl_mvm_mac_ctxt_add(mvm, vif); - if (ret) - goto out_unlock; - - /* Perform the binding */ - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (ret) - goto out_remove; - - /* Send the bcast station. At this stage the TBTT and DTIM time events - * are added and applied to the scheduler */ - ret = iwl_mvm_send_add_bcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* must be set before quota calculations */ - mvmvif->ap_ibss_active = true; - - /* power updated needs to be done before quotas */ - iwl_mvm_power_update_mac(mvm); - - ret = iwl_mvm_update_quotas(mvm, false, NULL); - if (ret) - goto out_quota_failed; - - /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ - if (vif->p2p && mvm->p2p_device_vif) - iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); - - iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); - - iwl_mvm_bt_coex_vif_change(mvm); - - /* we don't support TDLS during DCM */ - if (iwl_mvm_phy_ctx_count(mvm) > 1) - iwl_mvm_teardown_tdls_peers(mvm); - - goto out_unlock; - -out_quota_failed: - iwl_mvm_power_update_mac(mvm); - mvmvif->ap_ibss_active = false; - iwl_mvm_send_rm_bcast_sta(mvm, vif); -out_unbind: - iwl_mvm_binding_remove_vif(mvm, vif); -out_remove: - iwl_mvm_mac_ctxt_remove(mvm, vif); -out_unlock: - mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); - return ret; -} - -static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - iwl_mvm_prepare_mac_removal(mvm, vif); - - mutex_lock(&mvm->mutex); - - /* Handle AP stop while in CSA */ - if (rcu_access_pointer(mvm->csa_vif) == vif) { - iwl_mvm_remove_time_event(mvm, mvmvif, - &mvmvif->time_event_data); - RCU_INIT_POINTER(mvm->csa_vif, NULL); - mvmvif->csa_countdown = false; - } - - if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { - RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); - mvm->csa_tx_block_bcn_timeout = 0; - } - - mvmvif->ap_ibss_active = false; - mvm->ap_last_beacon_gp2 = 0; - - iwl_mvm_bt_coex_vif_change(mvm); - - iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); - - /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ - if (vif->p2p && mvm->p2p_device_vif) - iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); - - iwl_mvm_update_quotas(mvm, false, NULL); - iwl_mvm_send_rm_bcast_sta(mvm, vif); - iwl_mvm_binding_remove_vif(mvm, vif); - - iwl_mvm_power_update_mac(mvm); - - iwl_mvm_mac_ctxt_remove(mvm, vif); - - mutex_unlock(&mvm->mutex); -} - -static void -iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - /* Changes will be applied when the AP/IBSS is started */ - if (!mvmvif->ap_ibss_active) - return; - - if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | - BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) - IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); - - /* Need to send a new beacon template to the FW */ - if (changes & BSS_CHANGED_BEACON && - iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) - IWL_WARN(mvm, "Failed updating beacon data\n"); - - if (changes & BSS_CHANGED_TXPOWER) { - IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", - bss_conf->txpower); - iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); - } - -} - -static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - /* - * iwl_mvm_bss_info_changed_station() might call - * iwl_mvm_protect_session(), which reads directly from - * the device (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) - return; - - mutex_lock(&mvm->mutex); - - if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) - iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); - break; - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_ADHOC: - iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); - break; - default: - /* shouldn't happen */ - WARN_ON_ONCE(1); - } - - mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); -} - -static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_scan_request *hw_req) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - if (hw_req->req.n_channels == 0 || - hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) - return -EINVAL; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); - mutex_unlock(&mvm->mutex); - - return ret; -} - -static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - mutex_lock(&mvm->mutex); - - /* Due to a race condition, it's possible that mac80211 asks - * us to stop a hw_scan when it's already stopped. This can - * happen, for instance, if we stopped the scan ourselves, - * called ieee80211_scan_completed() and the userspace called - * cancel scan scan before ieee80211_scan_work() could run. - * To handle that, simply return if the scan is not running. - */ - if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) - iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - - mutex_unlock(&mvm->mutex); -} - -static void -iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u16 tids, - int num_frames, - enum ieee80211_frame_release_type reason, - bool more_data) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - /* Called when we need to transmit (a) frame(s) from mac80211 */ - - iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, - tids, more_data, false); -} - -static void -iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u16 tids, - int num_frames, - enum ieee80211_frame_release_type reason, - bool more_data) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - /* Called when we need to transmit (a) frame(s) from agg queue */ - - iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, - tids, more_data, true); -} - -static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned long txqs = 0, tids = 0; - int tid; - - spin_lock_bh(&mvmsta->lock); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - - if (tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) - continue; - - __set_bit(tid_data->txq_id, &txqs); - - if (iwl_mvm_tid_queued(tid_data) == 0) - continue; - - __set_bit(tid, &tids); - } - - switch (cmd) { - case STA_NOTIFY_SLEEP: - if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) - ieee80211_sta_block_awake(hw, sta, true); - - for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) - ieee80211_sta_set_buffered(sta, tid, true); - - if (txqs) - iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); - /* - * The fw updates the STA to be asleep. Tx packets on the Tx - * queues to this station will not be transmitted. The fw will - * send a Tx response with TX_STATUS_FAIL_DEST_PS. - */ - break; - case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) - break; - - if (txqs) - iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); - iwl_mvm_sta_modify_ps_wake(mvm, sta); - break; - default: - break; - } - spin_unlock_bh(&mvmsta->lock); -} - -static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - - /* - * This is called before mac80211 does RCU synchronisation, - * so here we already invalidate our internal RCU-protected - * station pointer. The rest of the code will thus no longer - * be able to find the station this way, and we don't rely - * on further RCU synchronisation after the sta_state() - * callback deleted the station. - */ - mutex_lock(&mvm->mutex); - if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], - ERR_PTR(-ENOENT)); - - if (mvm_sta->vif->type == NL80211_IFTYPE_AP) { - mvmvif->ap_assoc_sta_count--; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - - mutex_unlock(&mvm->mutex); -} - -static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - const u8 *bssid) -{ - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) - return; - - if (iwlwifi_mod_params.uapsd_disable) { - vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; - return; - } - - vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; -} - -static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - enum ieee80211_sta_state old_state, - enum ieee80211_sta_state new_state) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", - sta->addr, old_state, new_state); - - /* this would be a mac80211 bug ... but don't crash */ - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) - return -EINVAL; - - /* if a STA is being removed, reuse its ID */ - flush_work(&mvm->sta_drained_wk); - - mutex_lock(&mvm->mutex); - if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) { - /* - * Firmware bug - it'll crash if the beacon interval is less - * than 16. We can't avoid connecting at all, so refuse the - * station state change, this will cause mac80211 to abandon - * attempts to connect to this AP, and eventually wpa_s will - * blacklist the AP... - */ - if (vif->type == NL80211_IFTYPE_STATION && - vif->bss_conf.beacon_int < 16) { - IWL_ERR(mvm, - "AP %pM beacon interval is %d, refusing due to firmware bug!\n", - sta->addr, vif->bss_conf.beacon_int); - ret = -EINVAL; - goto out_unlock; - } - - if (sta->tdls && - (vif->p2p || - iwl_mvm_tdls_sta_count(mvm, NULL) == - IWL_MVM_TDLS_STA_COUNT || - iwl_mvm_phy_ctx_count(mvm) > 1)) { - IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); - ret = -EBUSY; - goto out_unlock; - } - - ret = iwl_mvm_add_sta(mvm, vif, sta); - if (sta->tdls && ret == 0) - iwl_mvm_recalc_tdls_state(mvm, vif, true); - } else if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_AUTH) { - /* - * EBS may be disabled due to previous failures reported by FW. - * Reset EBS status here assuming environment has been changed. - */ - mvm->last_ebs_successful = true; - iwl_mvm_check_uapsd(mvm, vif, sta->addr); - ret = 0; - } else if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_ASSOC) { - ret = iwl_mvm_update_sta(mvm, vif, sta); - if (ret == 0) - iwl_mvm_rs_rate_init(mvm, sta, - mvmvif->phy_ctxt->channel->band, - true); - } else if (old_state == IEEE80211_STA_ASSOC && - new_state == IEEE80211_STA_AUTHORIZED) { - - /* we don't support TDLS during DCM */ - if (iwl_mvm_phy_ctx_count(mvm) > 1) - iwl_mvm_teardown_tdls_peers(mvm); - - /* enable beacon filtering */ - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - ret = 0; - } else if (old_state == IEEE80211_STA_AUTHORIZED && - new_state == IEEE80211_STA_ASSOC) { - /* disable beacon filtering */ - WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); - ret = 0; - } else if (old_state == IEEE80211_STA_ASSOC && - new_state == IEEE80211_STA_AUTH) { - ret = 0; - } else if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_NONE) { - ret = 0; - } else if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST) { - ret = iwl_mvm_rm_sta(mvm, vif, sta); - if (sta->tdls) - iwl_mvm_recalc_tdls_state(mvm, vif, false); - } else { - ret = -EIO; - } - out_unlock: - mutex_unlock(&mvm->mutex); - - if (sta->tdls && ret == 0) { - if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) - ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); - else if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST) - ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); - } - - return ret; -} - -static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - mvm->rts_threshold = value; - - return 0; -} - -static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u32 changed) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - if (vif->type == NL80211_IFTYPE_STATION && - changed & IEEE80211_RC_NSS_CHANGED) - iwl_mvm_sf_update(mvm, vif, false); -} - -static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 ac, - const struct ieee80211_tx_queue_params *params) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->queue_params[ac] = *params; - - /* - * No need to update right away, we'll get BSS_CHANGED_QOS - * The exception is P2P_DEVICE interface which needs immediate update. - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - mutex_unlock(&mvm->mutex); - return ret; - } - return 0; -} - -static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS, - 200 + vif->bss_conf.beacon_int); - u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS, - 100 + vif->bss_conf.beacon_int); - - if (WARN_ON_ONCE(vif->bss_conf.assoc)) - return; - - /* - * iwl_mvm_protect_session() reads directly from the device - * (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) - return; - - mutex_lock(&mvm->mutex); - /* Try really hard to protect the session and hear a beacon */ - iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); -} - -static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - int ret; - - mutex_lock(&mvm->mutex); - - if (!vif->bss_conf.idle) { - ret = -EBUSY; - goto out; - } - - ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); - -out: - mutex_unlock(&mvm->mutex); - return ret; -} - -static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - - /* Due to a race condition, it's possible that mac80211 asks - * us to stop a sched_scan when it's already stopped. This - * can happen, for instance, if we stopped the scan ourselves, - * called ieee80211_sched_scan_stopped() and the userspace called - * stop sched scan scan before ieee80211_sched_scan_stopped_work() - * could run. To handle this, simply return if the scan is - * not running. - */ - if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { - mutex_unlock(&mvm->mutex); - return 0; - } - - ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); - mutex_unlock(&mvm->mutex); - iwl_mvm_wait_for_async_handlers(mvm); - - return ret; -} - -static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, - enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - if (iwlwifi_mod_params.sw_crypto) { - IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - break; - case WLAN_CIPHER_SUITE_CCMP: - key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; - break; - case WLAN_CIPHER_SUITE_AES_CMAC: - WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); - break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - /* For non-client mode, only use WEP keys for TX as we probably - * don't have a station yet anyway and would then have to keep - * track of the keys, linking them to each of the clients/peers - * as they appear. For now, don't do that, for performance WEP - * offload doesn't really matter much, but we need it for some - * other offload features in client mode. - */ - if (vif->type != NL80211_IFTYPE_STATION) - return 0; - break; - default: - /* currently FW supports only one optional cipher scheme */ - if (hw->n_cipher_schemes && - hw->cipher_schemes->cipher == key->cipher) - key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; - else - return -EOPNOTSUPP; - } - - mutex_lock(&mvm->mutex); - - switch (cmd) { - case SET_KEY: - if ((vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_AP) && !sta) { - /* - * GTK on AP interface is a TX-only key, return 0; - * on IBSS they're per-station and because we're lazy - * we don't support them for RX, so do the same. - */ - ret = 0; - key->hw_key_idx = STA_KEY_IDX_INVALID; - break; - } - - /* During FW restart, in order to restore the state as it was, - * don't try to reprogram keys we previously failed for. - */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - key->hw_key_idx == STA_KEY_IDX_INVALID) { - IWL_DEBUG_MAC80211(mvm, - "skip invalid idx key programming during restart\n"); - ret = 0; - break; - } - - IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, - test_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)); - if (ret) { - IWL_WARN(mvm, "set key failed\n"); - /* - * can't add key for RX, but we don't need it - * in the device for TX so still return 0 - */ - key->hw_key_idx = STA_KEY_IDX_INVALID; - ret = 0; - } - - break; - case DISABLE_KEY: - if (key->hw_key_idx == STA_KEY_IDX_INVALID) { - ret = 0; - break; - } - - IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); - ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&mvm->mutex); - return ret; -} - -static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) - return; - - iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); -} - - -static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - struct iwl_hs20_roc_res *resp; - int resp_len = iwl_rx_packet_payload_len(pkt); - struct iwl_mvm_time_event_data *te_data = data; - - if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) - return true; - - if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { - IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); - return true; - } - - resp = (void *)pkt->data; - - IWL_DEBUG_TE(mvm, - "Aux ROC: Recieved response from ucode: status=%d uid=%d\n", - resp->status, resp->event_unique_id); - - te_data->uid = le32_to_cpu(resp->event_unique_id); - IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", - te_data->uid); - - spin_lock_bh(&mvm->time_event_lock); - list_add_tail(&te_data->list, &mvm->aux_roc_te_list); - spin_unlock_bh(&mvm->time_event_lock); - - return true; -} - -#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200 -static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, - struct ieee80211_channel *channel, - struct ieee80211_vif *vif, - int duration) -{ - int res, time_reg = DEVICE_SYSTEM_TIME_REG; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; - static const u16 time_event_response[] = { HOT_SPOT_CMD }; - struct iwl_notification_wait wait_time_event; - struct iwl_hs20_roc_req aux_roc_req = { - .action = cpu_to_le32(FW_CTXT_ACTION_ADD), - .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), - .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), - /* Set the channel info data */ - .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ? - PHY_BAND_24 : PHY_BAND_5, - .channel_info.channel = channel->hw_value, - .channel_info.width = PHY_VHT_CHANNEL_MODE20, - /* Set the time and duration */ - .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), - .apply_time_max_delay = - cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)), - .duration = cpu_to_le32(MSEC_TO_TU(duration)), - }; - - /* Set the node address */ - memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->time_event_lock); - - if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { - spin_unlock_bh(&mvm->time_event_lock); - return -EIO; - } - - te_data->vif = vif; - te_data->duration = duration; - te_data->id = HOT_SPOT_CMD; - - spin_unlock_bh(&mvm->time_event_lock); - - /* - * Use a notification wait, which really just processes the - * command response and doesn't wait for anything, in order - * to be able to process the response and get the UID inside - * the RX path. Using CMD_WANT_SKB doesn't work because it - * stores the buffer and then wakes up this thread, by which - * time another notification (that the time event started) - * might already be processed unsuccessfully. - */ - iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, - time_event_response, - ARRAY_SIZE(time_event_response), - iwl_mvm_rx_aux_roc, te_data); - - res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), - &aux_roc_req); - - if (res) { - IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); - iwl_remove_notification(&mvm->notif_wait, &wait_time_event); - goto out_clear_te; - } - - /* No need to wait for anything, so just pass 1 (0 isn't valid) */ - res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); - /* should never fail */ - WARN_ON_ONCE(res); - - if (res) { - out_clear_te: - spin_lock_bh(&mvm->time_event_lock); - iwl_mvm_te_clear_data(mvm, te_data); - spin_unlock_bh(&mvm->time_event_lock); - } - - return res; -} - -static int iwl_mvm_roc(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel *channel, - int duration, - enum ieee80211_roc_type type) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct cfg80211_chan_def chandef; - struct iwl_mvm_phy_ctxt *phy_ctxt; - int ret, i; - - IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, - duration, type); - - flush_work(&mvm->roc_done_wk); - - mutex_lock(&mvm->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { - /* Use aux roc framework (HS20) */ - ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, - vif, duration); - goto out_unlock; - } - IWL_ERR(mvm, "hotspot not supported\n"); - ret = -EINVAL; - goto out_unlock; - case NL80211_IFTYPE_P2P_DEVICE: - /* handle below */ - break; - default: - IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); - ret = -EINVAL; - goto out_unlock; - } - - for (i = 0; i < NUM_PHY_CTX; i++) { - phy_ctxt = &mvm->phy_ctxts[i]; - if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) - continue; - - if (phy_ctxt->ref && channel == phy_ctxt->channel) { - /* - * Unbind the P2P_DEVICE from the current PHY context, - * and if the PHY context is not used remove it. - */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - - /* Bind the P2P_DEVICE to the current PHY Context */ - mvmvif->phy_ctxt = phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (WARN(ret, "Failed binding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); - goto schedule_time_event; - } - } - - /* Need to update the PHY context only if the ROC channel changed */ - if (channel == mvmvif->phy_ctxt->channel) - goto schedule_time_event; - - cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - - /* - * Change the PHY context configuration as it is currently referenced - * only by the P2P Device MAC - */ - if (mvmvif->phy_ctxt->ref == 1) { - ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, - &chandef, 1, 1); - if (ret) - goto out_unlock; - } else { - /* - * The PHY context is shared with other MACs. Need to remove the - * P2P Device from the binding, allocate an new PHY context and - * create a new binding - */ - phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!phy_ctxt) { - ret = -ENOSPC; - goto out_unlock; - } - - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, - 1, 1); - if (ret) { - IWL_ERR(mvm, "Failed to change PHY context\n"); - goto out_unlock; - } - - /* Unbind the P2P_DEVICE from the current PHY context */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - - /* Bind the P2P_DEVICE to the new allocated PHY context */ - mvmvif->phy_ctxt = phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (WARN(ret, "Failed binding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); - } - -schedule_time_event: - /* Schedule the time events */ - ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); - -out_unlock: - mutex_unlock(&mvm->mutex); - IWL_DEBUG_MAC80211(mvm, "leave\n"); - return ret; -} - -static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - IWL_DEBUG_MAC80211(mvm, "enter\n"); - - mutex_lock(&mvm->mutex); - iwl_mvm_stop_roc(mvm); - mutex_unlock(&mvm->mutex); - - IWL_DEBUG_MAC80211(mvm, "leave\n"); - return 0; -} - -static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, - struct ieee80211_chanctx_conf *ctx) -{ - u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; - struct iwl_mvm_phy_ctxt *phy_ctxt; - int ret; - - lockdep_assert_held(&mvm->mutex); - - IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); - - phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!phy_ctxt) { - ret = -ENOSPC; - goto out; - } - - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, - ctx->rx_chains_static, - ctx->rx_chains_dynamic); - if (ret) { - IWL_ERR(mvm, "Failed to add PHY context\n"); - goto out; - } - - iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); - *phy_ctxt_id = phy_ctxt->id; -out: - return ret; -} - -static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_add_chanctx(mvm, ctx); - mutex_unlock(&mvm->mutex); - - return ret; -} - -static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, - struct ieee80211_chanctx_conf *ctx) -{ - u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; - struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - - lockdep_assert_held(&mvm->mutex); - - iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); -} - -static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - mutex_lock(&mvm->mutex); - __iwl_mvm_remove_chanctx(mvm, ctx); - mutex_unlock(&mvm->mutex); -} - -static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - u32 changed) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; - struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - - if (WARN_ONCE((phy_ctxt->ref > 1) && - (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | - IEEE80211_CHANCTX_CHANGE_RX_CHAINS | - IEEE80211_CHANCTX_CHANGE_RADAR | - IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), - "Cannot change PHY. Ref=%d, changed=0x%X\n", - phy_ctxt->ref, changed)) - return; - - mutex_lock(&mvm->mutex); - iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, - ctx->rx_chains_static, - ctx->rx_chains_dynamic); - mutex_unlock(&mvm->mutex); -} - -static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx, - bool switching_chanctx) -{ - u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; - struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - lockdep_assert_held(&mvm->mutex); - - mvmvif->phy_ctxt = phy_ctxt; - - switch (vif->type) { - case NL80211_IFTYPE_AP: - /* only needed if we're switching chanctx (i.e. during CSA) */ - if (switching_chanctx) { - mvmvif->ap_ibss_active = true; - break; - } - case NL80211_IFTYPE_ADHOC: - /* - * The AP binding flow is handled as part of the start_ap flow - * (in bss_info_changed), similarly for IBSS. - */ - ret = 0; - goto out; - case NL80211_IFTYPE_STATION: - break; - case NL80211_IFTYPE_MONITOR: - /* always disable PS when a monitor interface is active */ - mvmvif->ps_disabled = true; - break; - default: - ret = -EINVAL; - goto out; - } - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (ret) - goto out; - - /* - * Power state must be updated before quotas, - * otherwise fw will complain. - */ - iwl_mvm_power_update_mac(mvm); - - /* Setting the quota at this stage is only required for monitor - * interfaces. For the other types, the bss_info changed flow - * will handle quota settings. - */ - if (vif->type == NL80211_IFTYPE_MONITOR) { - mvmvif->monitor_active = true; - ret = iwl_mvm_update_quotas(mvm, false, NULL); - if (ret) - goto out_remove_binding; - } - - /* Handle binding during CSA */ - if (vif->type == NL80211_IFTYPE_AP) { - iwl_mvm_update_quotas(mvm, false, NULL); - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - - if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { - u32 duration = 2 * vif->bss_conf.beacon_int; - - /* iwl_mvm_protect_session() reads directly from the - * device (the system time), so make sure it is - * available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); - if (ret) - goto out_remove_binding; - - /* Protect the session to make sure we hear the first - * beacon on the new channel. - */ - iwl_mvm_protect_session(mvm, vif, duration, duration, - vif->bss_conf.beacon_int / 2, - true); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); - - iwl_mvm_update_quotas(mvm, false, NULL); - } - - goto out; - -out_remove_binding: - iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_power_update_mac(mvm); -out: - if (ret) - mvmvif->phy_ctxt = NULL; - return ret; -} -static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); - mutex_unlock(&mvm->mutex); - - return ret; -} - -static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx, - bool switching_chanctx) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_vif *disabled_vif = NULL; - - lockdep_assert_held(&mvm->mutex); - - iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); - - switch (vif->type) { - case NL80211_IFTYPE_ADHOC: - goto out; - case NL80211_IFTYPE_MONITOR: - mvmvif->monitor_active = false; - mvmvif->ps_disabled = false; - break; - case NL80211_IFTYPE_AP: - /* This part is triggered only during CSA */ - if (!switching_chanctx || !mvmvif->ap_ibss_active) - goto out; - - mvmvif->csa_countdown = false; - - /* Set CS bit on all the stations */ - iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); - - /* Save blocked iface, the timeout is set on the next beacon */ - rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); - - mvmvif->ap_ibss_active = false; - break; - case NL80211_IFTYPE_STATION: - if (!switching_chanctx) - break; - - disabled_vif = vif; - - iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); - break; - default: - break; - } - - iwl_mvm_update_quotas(mvm, false, disabled_vif); - iwl_mvm_binding_remove_vif(mvm, vif); - -out: - mvmvif->phy_ctxt = NULL; - iwl_mvm_power_update_mac(mvm); -} - -static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); - mutex_unlock(&mvm->mutex); -} - -static int -iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, - struct ieee80211_vif_chanctx_switch *vifs) -{ - int ret; - - mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); - __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); - - ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); - if (ret) { - IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); - goto out_reassign; - } - - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, - true); - if (ret) { - IWL_ERR(mvm, - "failed to assign new_ctx during channel switch\n"); - goto out_remove; - } - - /* we don't support TDLS during DCM - can be caused by channel switch */ - if (iwl_mvm_phy_ctx_count(mvm) > 1) - iwl_mvm_teardown_tdls_peers(mvm); - - goto out; - -out_remove: - __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); - -out_reassign: - if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { - IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); - goto out_restart; - } - - if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, - true)) { - IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); - goto out_restart; - } - - goto out; - -out_restart: - /* things keep failing, better restart the hw */ - iwl_mvm_nic_restart(mvm, false); - -out: - mutex_unlock(&mvm->mutex); - - return ret; -} - -static int -iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, - struct ieee80211_vif_chanctx_switch *vifs) -{ - int ret; - - mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); - - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, - true); - if (ret) { - IWL_ERR(mvm, - "failed to assign new_ctx during channel switch\n"); - goto out_reassign; - } - - goto out; - -out_reassign: - if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, - true)) { - IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); - goto out_restart; - } - - goto out; - -out_restart: - /* things keep failing, better restart the hw */ - iwl_mvm_nic_restart(mvm, false); - -out: - mutex_unlock(&mvm->mutex); - - return ret; -} - -static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif_chanctx_switch *vifs, - int n_vifs, - enum ieee80211_chanctx_switch_mode mode) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - /* we only support a single-vif right now */ - if (n_vifs > 1) - return -EOPNOTSUPP; - - switch (mode) { - case CHANCTX_SWMODE_SWAP_CONTEXTS: - ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); - break; - case CHANCTX_SWMODE_REASSIGN_VIF: - ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} - -static int iwl_mvm_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - bool set) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - - if (!mvm_sta || !mvm_sta->vif) { - IWL_ERR(mvm, "Station is not associated to a vif\n"); - return -EINVAL; - } - - return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); -} - -#ifdef CONFIG_NL80211_TESTMODE -static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { - [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, - [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, - [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, -}; - -static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - void *data, int len) -{ - struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; - int err; - u32 noa_duration; - - err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy); - if (err) - return err; - - if (!tb[IWL_MVM_TM_ATTR_CMD]) - return -EINVAL; - - switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { - case IWL_MVM_TM_CMD_SET_NOA: - if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || - !vif->bss_conf.enable_beacon || - !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) - return -EINVAL; - - noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); - if (noa_duration >= vif->bss_conf.beacon_int) - return -EINVAL; - - mvm->noa_duration = noa_duration; - mvm->noa_vif = vif; - - return iwl_mvm_update_quotas(mvm, false, NULL); - case IWL_MVM_TM_CMD_SET_BEACON_FILTER: - /* must be associated client vif - ignore authorized */ - if (!vif || vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || - !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) - return -EINVAL; - - if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) - return iwl_mvm_enable_beacon_filter(mvm, vif, 0); - return iwl_mvm_disable_beacon_filter(mvm, vif, 0); - } - - return -EOPNOTSUPP; -} - -static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - void *data, int len) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int err; - - mutex_lock(&mvm->mutex); - err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); - mutex_unlock(&mvm->mutex); - - return err; -} -#endif - -static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) -{ - /* By implementing this operation, we prevent mac80211 from - * starting its own channel switch timer, so that we can call - * ieee80211_chswitch_done() ourselves at the right time - * (which is when the absence time event starts). - */ - - IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), - "dummy channel switch op\n"); -} - -static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct ieee80211_vif *csa_vif; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 apply_time; - int ret; - - mutex_lock(&mvm->mutex); - - mvmvif->csa_failed = false; - - IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", - chsw->chandef.center_freq1); - - iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH); - - switch (vif->type) { - case NL80211_IFTYPE_AP: - csa_vif = - rcu_dereference_protected(mvm->csa_vif, - lockdep_is_held(&mvm->mutex)); - if (WARN_ONCE(csa_vif && csa_vif->csa_active, - "Another CSA is already in progress")) { - ret = -EBUSY; - goto out_unlock; - } - - rcu_assign_pointer(mvm->csa_vif, vif); - - if (WARN_ONCE(mvmvif->csa_countdown, - "Previous CSA countdown didn't complete")) { - ret = -EBUSY; - goto out_unlock; - } - - break; - case NL80211_IFTYPE_STATION: - /* Schedule the time event to a bit before beacon 1, - * to make sure we're in the new channel when the - * GO/AP arrives. - */ - apply_time = chsw->device_timestamp + - ((vif->bss_conf.beacon_int * (chsw->count - 1) - - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); - - if (chsw->block_tx) - iwl_mvm_csa_client_absent(mvm, vif); - - iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, - apply_time); - if (mvmvif->bf_data.bf_enabled) { - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_unlock; - } - - break; - default: - break; - } - - mvmvif->ps_disabled = true; - - ret = iwl_mvm_power_update_ps(mvm); - if (ret) - goto out_unlock; - - /* we won't be on this channel any longer */ - iwl_mvm_teardown_tdls_peers(mvm); - -out_unlock: - mutex_unlock(&mvm->mutex); - - return ret; -} - -static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - - if (mvmvif->csa_failed) { - mvmvif->csa_failed = false; - ret = -EIO; - goto out_unlock; - } - - if (vif->type == NL80211_IFTYPE_STATION) { - struct iwl_mvm_sta *mvmsta; - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); - - if (WARN_ON(!mvmsta)) { - ret = -EIO; - goto out_unlock; - } - - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_unlock; - - iwl_mvm_stop_session_protection(mvm, vif); - } - - mvmvif->ps_disabled = false; - - ret = iwl_mvm_power_update_ps(mvm); - -out_unlock: - mutex_unlock(&mvm->mutex); - - return ret; -} - -static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u32 queues, bool drop) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif; - struct iwl_mvm_sta *mvmsta; - struct ieee80211_sta *sta; - int i; - u32 msk = 0; - - if (!vif || vif->type != NL80211_IFTYPE_STATION) - return; - - mutex_lock(&mvm->mutex); - mvmvif = iwl_mvm_vif_from_mac80211(vif); - - /* flush the AP-station and all TDLS peers */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) - continue; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->vif != vif) - continue; - - /* make sure only TDLS peers or the AP are flushed */ - WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); - - msk |= mvmsta->tfd_queue_msk; - } - - if (drop) { - if (iwl_mvm_flush_tx_path(mvm, msk, 0)) - IWL_ERR(mvm, "flush request fail\n"); - mutex_unlock(&mvm->mutex); - } else { - mutex_unlock(&mvm->mutex); - - /* this can take a while, and we may need/want other operations - * to succeed while doing this, so do it without the mutex held - */ - iwl_trans_wait_tx_queue_empty(mvm->trans, msk); - } -} - -static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, - struct survey_info *survey) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - memset(survey, 0, sizeof(*survey)); - - /* only support global statistics right now */ - if (idx != 0) - return -ENOENT; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) - return -ENOENT; - - mutex_lock(&mvm->mutex); - - if (mvm->ucode_loaded) { - ret = iwl_mvm_request_statistics(mvm, false); - if (ret) - goto out; - } - - survey->filled = SURVEY_INFO_TIME | - SURVEY_INFO_TIME_RX | - SURVEY_INFO_TIME_TX | - SURVEY_INFO_TIME_SCAN; - survey->time = mvm->accu_radio_stats.on_time_rf + - mvm->radio_stats.on_time_rf; - do_div(survey->time, USEC_PER_MSEC); - - survey->time_rx = mvm->accu_radio_stats.rx_time + - mvm->radio_stats.rx_time; - do_div(survey->time_rx, USEC_PER_MSEC); - - survey->time_tx = mvm->accu_radio_stats.tx_time + - mvm->radio_stats.tx_time; - do_div(survey->time_tx, USEC_PER_MSEC); - - survey->time_scan = mvm->accu_radio_stats.on_time_scan + - mvm->radio_stats.on_time_scan; - do_div(survey->time_scan, USEC_PER_MSEC); - - ret = 0; - out: - mutex_unlock(&mvm->mutex); - return ret; -} - -static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) - return; - - /* if beacon filtering isn't on mac80211 does it anyway */ - if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) - return; - - if (!vif->bss_conf.assoc) - return; - - mutex_lock(&mvm->mutex); - - if (mvmvif->ap_sta_id != mvmsta->sta_id) - goto unlock; - - if (iwl_mvm_request_statistics(mvm, false)) - goto unlock; - - sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + - mvmvif->beacon_stats.accu_num_beacons; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); - if (mvmvif->beacon_stats.avg_signal) { - /* firmware only reports a value after RXing a few beacons */ - sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); - } - unlock: - mutex_unlock(&mvm->mutex); -} - -static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) -{ -#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \ - do { \ - if ((_cnt) && --(_cnt)) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\ - } while (0) - - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_mlme *trig_mlme; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); - trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) - return; - - if (event->u.mlme.data == ASSOC_EVENT) { - if (event->u.mlme.status == MLME_DENIED) - CHECK_MLME_TRIGGER(mvm, trig, buf, - trig_mlme->stop_assoc_denied, - "DENIED ASSOC: reason %d", - event->u.mlme.reason); - else if (event->u.mlme.status == MLME_TIMEOUT) - CHECK_MLME_TRIGGER(mvm, trig, buf, - trig_mlme->stop_assoc_timeout, - "ASSOC TIMEOUT"); - } else if (event->u.mlme.data == AUTH_EVENT) { - if (event->u.mlme.status == MLME_DENIED) - CHECK_MLME_TRIGGER(mvm, trig, buf, - trig_mlme->stop_auth_denied, - "DENIED AUTH: reason %d", - event->u.mlme.reason); - else if (event->u.mlme.status == MLME_TIMEOUT) - CHECK_MLME_TRIGGER(mvm, trig, buf, - trig_mlme->stop_auth_timeout, - "AUTH TIMEOUT"); - } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { - CHECK_MLME_TRIGGER(mvm, trig, buf, - trig_mlme->stop_rx_deauth, - "DEAUTH RX %d", event->u.mlme.reason); - } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { - CHECK_MLME_TRIGGER(mvm, trig, buf, - trig_mlme->stop_tx_deauth, - "DEAUTH TX %d", event->u.mlme.reason); - } -#undef CHECK_MLME_TRIGGER -} - -static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_ba *ba_trig; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); - ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) - return; - - if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR received from %pM, tid %d, ssn %d", - event->u.ba.sta->addr, event->u.ba.tid, - event->u.ba.ssn); -} - -static void -iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_ba *ba_trig; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); - ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) - return; - - if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Frame from %pM timed out, tid %d", - event->u.ba.sta->addr, event->u.ba.tid); -} - -static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - switch (event->type) { - case MLME_EVENT: - iwl_mvm_event_mlme_callback(mvm, vif, event); - break; - case BAR_RX_EVENT: - iwl_mvm_event_bar_rx_callback(mvm, vif, event); - break; - case BA_FRAME_TIMEOUT: - iwl_mvm_event_frame_timeout_callback(mvm, vif, event); - break; - default: - break; - } -} - -const struct ieee80211_ops iwl_mvm_hw_ops = { - .tx = iwl_mvm_mac_tx, - .ampdu_action = iwl_mvm_mac_ampdu_action, - .start = iwl_mvm_mac_start, - .reconfig_complete = iwl_mvm_mac_reconfig_complete, - .stop = iwl_mvm_mac_stop, - .add_interface = iwl_mvm_mac_add_interface, - .remove_interface = iwl_mvm_mac_remove_interface, - .config = iwl_mvm_mac_config, - .prepare_multicast = iwl_mvm_prepare_multicast, - .configure_filter = iwl_mvm_configure_filter, - .config_iface_filter = iwl_mvm_config_iface_filter, - .bss_info_changed = iwl_mvm_bss_info_changed, - .hw_scan = iwl_mvm_mac_hw_scan, - .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, - .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, - .sta_state = iwl_mvm_mac_sta_state, - .sta_notify = iwl_mvm_mac_sta_notify, - .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, - .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, - .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, - .sta_rc_update = iwl_mvm_sta_rc_update, - .conf_tx = iwl_mvm_mac_conf_tx, - .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, - .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, - .flush = iwl_mvm_mac_flush, - .sched_scan_start = iwl_mvm_mac_sched_scan_start, - .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, - .set_key = iwl_mvm_mac_set_key, - .update_tkip_key = iwl_mvm_mac_update_tkip_key, - .remain_on_channel = iwl_mvm_roc, - .cancel_remain_on_channel = iwl_mvm_cancel_roc, - .add_chanctx = iwl_mvm_add_chanctx, - .remove_chanctx = iwl_mvm_remove_chanctx, - .change_chanctx = iwl_mvm_change_chanctx, - .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, - .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, - .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, - - .start_ap = iwl_mvm_start_ap_ibss, - .stop_ap = iwl_mvm_stop_ap_ibss, - .join_ibss = iwl_mvm_start_ap_ibss, - .leave_ibss = iwl_mvm_stop_ap_ibss, - - .set_tim = iwl_mvm_set_tim, - - .channel_switch = iwl_mvm_channel_switch, - .pre_channel_switch = iwl_mvm_pre_channel_switch, - .post_channel_switch = iwl_mvm_post_channel_switch, - - .tdls_channel_switch = iwl_mvm_tdls_channel_switch, - .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, - .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, - - .event_callback = iwl_mvm_mac_event_callback, - - CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) - -#ifdef CONFIG_PM_SLEEP - /* look at d3.c */ - .suspend = iwl_mvm_suspend, - .resume = iwl_mvm_resume, - .set_wakeup = iwl_mvm_set_wakeup, - .set_rekey_data = iwl_mvm_set_rekey_data, -#if IS_ENABLED(CONFIG_IPV6) - .ipv6_addr_change = iwl_mvm_ipv6_addr_change, -#endif - .set_default_unicast_key = iwl_mvm_set_default_unicast_key, -#endif - .get_survey = iwl_mvm_mac_get_survey, - .sta_statistics = iwl_mvm_mac_sta_statistics, -}; diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h deleted file mode 100644 index 4bde2d027dcd..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ /dev/null @@ -1,1535 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __IWL_MVM_H__ -#define __IWL_MVM_H__ - -#include -#include -#include -#include - -#include "iwl-op-mode.h" -#include "iwl-trans.h" -#include "iwl-notif-wait.h" -#include "iwl-eeprom-parse.h" -#include "iwl-fw-file.h" -#include "iwl-config.h" -#include "sta.h" -#include "fw-api.h" -#include "constants.h" -#include "tof.h" - -#define IWL_MVM_MAX_ADDRESSES 5 -/* RSSI offset for WkP */ -#define IWL_RSSI_OFFSET 50 -#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 -/* A TimeUnit is 1024 microsecond */ -#define MSEC_TO_TU(_msec) (_msec*1000/1024) - -/* For GO, this value represents the number of TUs before CSA "beacon - * 0" TBTT when the CSA time-event needs to be scheduled to start. It - * must be big enough to ensure that we switch in time. - */ -#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 - -/* For client, this value represents the number of TUs before CSA - * "beacon 1" TBTT, instead. This is because we don't know when the - * GO/AP will be in the new channel, so we switch early enough. - */ -#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 - -/* - * This value (in TUs) is used to fine tune the CSA NoA end time which should - * be just before "beacon 0" TBTT. - */ -#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 - -/* - * Number of beacons to transmit on a new channel until we unblock tx to - * the stations, even if we didn't identify them on a new channel - */ -#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 - -extern const struct ieee80211_ops iwl_mvm_hw_ops; - -/** - * struct iwl_mvm_mod_params - module parameters for iwlmvm - * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. - * We will register to mac80211 to have testmode working. The NIC must not - * be up'ed after the INIT fw asserted. This is useful to be able to use - * proprietary tools over testmode to debug the INIT fw. - * @tfd_q_hang_detect: enabled the detection of hung transmit queues - * @power_scheme: one of enum iwl_power_scheme - */ -struct iwl_mvm_mod_params { - bool init_dbg; - bool tfd_q_hang_detect; - int power_scheme; -}; -extern struct iwl_mvm_mod_params iwlmvm_mod_params; - -/** - * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump - * - * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode - * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the - * transport's data. - * @trans_len: length of the valid data in trans_ptr - * @op_mode_len: length of the valid data in op_mode_ptr - */ -struct iwl_mvm_dump_ptrs { - struct iwl_trans_dump_data *trans_ptr; - void *op_mode_ptr; - u32 op_mode_len; -}; - -/** - * struct iwl_mvm_dump_desc - describes the dump - * @len: length of trig_desc->data - * @trig_desc: the description of the dump - */ -struct iwl_mvm_dump_desc { - size_t len; - /* must be last */ - struct iwl_fw_error_dump_trigger_desc trig_desc; -}; - -extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; - -struct iwl_mvm_phy_ctxt { - u16 id; - u16 color; - u32 ref; - - /* - * TODO: This should probably be removed. Currently here only for rate - * scaling algorithm - */ - struct ieee80211_channel *channel; -}; - -struct iwl_mvm_time_event_data { - struct ieee80211_vif *vif; - struct list_head list; - unsigned long end_jiffies; - u32 duration; - bool running; - u32 uid; - - /* - * The access to the 'id' field must be done when the - * mvm->time_event_lock is held, as it value is used to indicate - * if the te is in the time event list or not (when id == TE_MAX) - */ - u32 id; -}; - - /* Power management */ - -/** - * enum iwl_power_scheme - * @IWL_POWER_LEVEL_CAM - Continuously Active Mode - * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) - * @IWL_POWER_LEVEL_LP - Low Power - */ -enum iwl_power_scheme { - IWL_POWER_SCHEME_CAM = 1, - IWL_POWER_SCHEME_BPS, - IWL_POWER_SCHEME_LP -}; - -#define IWL_CONN_MAX_LISTEN_INTERVAL 10 -#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 - -#ifdef CONFIG_IWLWIFI_DEBUGFS -enum iwl_dbgfs_pm_mask { - MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), - MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), - MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), - MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), - MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), - MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), - MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), - MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), - MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), - MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), -}; - -struct iwl_dbgfs_pm { - u16 keep_alive_seconds; - u32 rx_data_timeout; - u32 tx_data_timeout; - bool skip_over_dtim; - u8 skip_dtim_periods; - bool lprx_ena; - u32 lprx_rssi_threshold; - bool snooze_ena; - bool uapsd_misbehaving; - bool use_ps_poll; - int mask; -}; - -/* beacon filtering */ - -enum iwl_dbgfs_bf_mask { - MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), - MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), - MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), - MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), - MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), - MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), - MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), - MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), - MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), - MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), - MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), -}; - -struct iwl_dbgfs_bf { - u32 bf_energy_delta; - u32 bf_roaming_energy_delta; - u32 bf_roaming_state; - u32 bf_temp_threshold; - u32 bf_temp_fast_filter; - u32 bf_temp_slow_filter; - u32 bf_enable_beacon_filter; - u32 bf_debug_flag; - u32 bf_escape_timer; - u32 ba_escape_timer; - u32 ba_enable_beacon_abort; - int mask; -}; -#endif - -enum iwl_mvm_smps_type_request { - IWL_MVM_SMPS_REQ_BT_COEX, - IWL_MVM_SMPS_REQ_TT, - IWL_MVM_SMPS_REQ_PROT, - NUM_IWL_MVM_SMPS_REQ, -}; - -enum iwl_mvm_ref_type { - IWL_MVM_REF_UCODE_DOWN, - IWL_MVM_REF_SCAN, - IWL_MVM_REF_ROC, - IWL_MVM_REF_ROC_AUX, - IWL_MVM_REF_P2P_CLIENT, - IWL_MVM_REF_AP_IBSS, - IWL_MVM_REF_USER, - IWL_MVM_REF_TX, - IWL_MVM_REF_TX_AGG, - IWL_MVM_REF_ADD_IF, - IWL_MVM_REF_START_AP, - IWL_MVM_REF_BSS_CHANGED, - IWL_MVM_REF_PREPARE_TX, - IWL_MVM_REF_PROTECT_TDLS, - IWL_MVM_REF_CHECK_CTKILL, - IWL_MVM_REF_PRPH_READ, - IWL_MVM_REF_PRPH_WRITE, - IWL_MVM_REF_NMI, - IWL_MVM_REF_TM_CMD, - IWL_MVM_REF_EXIT_WORK, - IWL_MVM_REF_PROTECT_CSA, - IWL_MVM_REF_FW_DBG_COLLECT, - - /* update debugfs.c when changing this */ - - IWL_MVM_REF_COUNT, -}; - -enum iwl_bt_force_ant_mode { - BT_FORCE_ANT_DIS = 0, - BT_FORCE_ANT_AUTO, - BT_FORCE_ANT_BT, - BT_FORCE_ANT_WIFI, - - BT_FORCE_ANT_MAX, -}; - -/** -* struct iwl_mvm_vif_bf_data - beacon filtering related data -* @bf_enabled: indicates if beacon filtering is enabled -* @ba_enabled: indicated if beacon abort is enabled -* @ave_beacon_signal: average beacon signal -* @last_cqm_event: rssi of the last cqm event -* @bt_coex_min_thold: minimum threshold for BT coex -* @bt_coex_max_thold: maximum threshold for BT coex -* @last_bt_coex_event: rssi of the last BT coex event -*/ -struct iwl_mvm_vif_bf_data { - bool bf_enabled; - bool ba_enabled; - int ave_beacon_signal; - int last_cqm_event; - int bt_coex_min_thold; - int bt_coex_max_thold; - int last_bt_coex_event; -}; - -/** - * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context - * @id: between 0 and 3 - * @color: to solve races upon MAC addition and removal - * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA - * @bssid: BSSID for this (client) interface - * @associated: indicates that we're currently associated, used only for - * managing the firmware state in iwl_mvm_bss_info_changed_station() - * @ap_assoc_sta_count: count of stations associated to us - valid only - * if VIF type is AP - * @uploaded: indicates the MAC context has been added to the device - * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface - * should get quota etc. - * @pm_enabled - Indicate if MAC power management is allowed - * @monitor_active: indicates that monitor context is configured, and that the - * interface should get quota etc. - * @low_latency: indicates that this interface is in low-latency mode - * (VMACLowLatencyMode) - * @ps_disabled: indicates that this interface requires PS to be disabled - * @queue_params: QoS params for this MAC - * @bcast_sta: station used for broadcast packets. Used by the following - * vifs: P2P_DEVICE, GO and AP. - * @beacon_skb: the skb used to hold the AP/GO beacon template - * @smps_requests: the SMPS requests of different parts of the driver, - * combined on update to yield the overall request to mac80211. - * @beacon_stats: beacon statistics, containing the # of received beacons, - * # of received beacons accumulated over FW restart, and the current - * average signal of beacons retrieved from the firmware - * @csa_failed: CSA failed to schedule time event, report an error later - * @features: hw features active for this vif - */ -struct iwl_mvm_vif { - struct iwl_mvm *mvm; - u16 id; - u16 color; - u8 ap_sta_id; - - u8 bssid[ETH_ALEN]; - bool associated; - u8 ap_assoc_sta_count; - - bool uploaded; - bool ap_ibss_active; - bool pm_enabled; - bool monitor_active; - bool low_latency; - bool ps_disabled; - struct iwl_mvm_vif_bf_data bf_data; - - struct { - u32 num_beacons, accu_num_beacons; - u8 avg_signal; - } beacon_stats; - - u32 ap_beacon_time; - - enum iwl_tsf_id tsf_id; - - /* - * QoS data from mac80211, need to store this here - * as mac80211 has a separate callback but we need - * to have the data for the MAC context - */ - struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; - struct iwl_mvm_time_event_data time_event_data; - struct iwl_mvm_time_event_data hs_time_event_data; - - struct iwl_mvm_int_sta bcast_sta; - - /* - * Assigned while mac80211 has the interface in a channel context, - * or, for P2P Device, while it exists. - */ - struct iwl_mvm_phy_ctxt *phy_ctxt; - -#ifdef CONFIG_PM_SLEEP - /* WoWLAN GTK rekey data */ - struct { - u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; - __le64 replay_ctr; - bool valid; - } rekey_data; - - int tx_key_idx; - - bool seqno_valid; - u16 seqno; -#endif - -#if IS_ENABLED(CONFIG_IPV6) - /* IPv6 addresses for WoWLAN */ - struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; - int num_target_ipv6_addrs; -#endif - -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct dentry *dbgfs_dir; - struct dentry *dbgfs_slink; - struct iwl_dbgfs_pm dbgfs_pm; - struct iwl_dbgfs_bf dbgfs_bf; - struct iwl_mac_power_cmd mac_pwr_cmd; -#endif - - enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; - - /* FW identified misbehaving AP */ - u8 uapsd_misbehaving_bssid[ETH_ALEN]; - - /* Indicates that CSA countdown may be started */ - bool csa_countdown; - bool csa_failed; - - /* TCP Checksum Offload */ - netdev_features_t features; -}; - -static inline struct iwl_mvm_vif * -iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) -{ - return (void *)vif->drv_priv; -} - -extern const u8 tid_to_mac80211_ac[]; - -#define IWL_MVM_SCAN_STOPPING_SHIFT 8 - -enum iwl_scan_status { - IWL_MVM_SCAN_REGULAR = BIT(0), - IWL_MVM_SCAN_SCHED = BIT(1), - IWL_MVM_SCAN_NETDETECT = BIT(2), - - IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), - IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), - IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), - - IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | - IWL_MVM_SCAN_STOPPING_REGULAR, - IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | - IWL_MVM_SCAN_STOPPING_SCHED, - IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | - IWL_MVM_SCAN_STOPPING_NETDETECT, - - IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, - IWL_MVM_SCAN_MASK = 0xff, -}; - -/** - * struct iwl_nvm_section - describes an NVM section in memory. - * - * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, - * and saved for later use by the driver. Not all NVM sections are saved - * this way, only the needed ones. - */ -struct iwl_nvm_section { - u16 length; - const u8 *data; -}; - -/** - * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure - * @ct_kill_exit: worker to exit thermal kill - * @dynamic_smps: Is thermal throttling enabled dynamic_smps? - * @tx_backoff: The current thremal throttling tx backoff in uSec. - * @min_backoff: The minimal tx backoff due to power restrictions - * @params: Parameters to configure the thermal throttling algorithm. - * @throttle: Is thermal throttling is active? - */ -struct iwl_mvm_tt_mgmt { - struct delayed_work ct_kill_exit; - bool dynamic_smps; - u32 tx_backoff; - u32 min_backoff; - struct iwl_tt_params params; - bool throttle; -}; - -#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 - -struct iwl_mvm_frame_stats { - u32 legacy_frames; - u32 ht_frames; - u32 vht_frames; - u32 bw_20_frames; - u32 bw_40_frames; - u32 bw_80_frames; - u32 bw_160_frames; - u32 sgi_frames; - u32 ngi_frames; - u32 siso_frames; - u32 mimo2_frames; - u32 agg_frames; - u32 ampdu_count; - u32 success_frames; - u32 fail_frames; - u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; - int last_frame_idx; -}; - -enum { - D0I3_DEFER_WAKEUP, - D0I3_PENDING_WAKEUP, -}; - -#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff -#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 -#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 - -enum iwl_mvm_tdls_cs_state { - IWL_MVM_TDLS_SW_IDLE = 0, - IWL_MVM_TDLS_SW_REQ_SENT, - IWL_MVM_TDLS_SW_RESP_RCVD, - IWL_MVM_TDLS_SW_REQ_RCVD, - IWL_MVM_TDLS_SW_ACTIVE, -}; - -struct iwl_mvm_shared_mem_cfg { - u32 shared_mem_addr; - u32 shared_mem_size; - u32 sample_buff_addr; - u32 sample_buff_size; - u32 txfifo_addr; - u32 txfifo_size[TX_FIFO_MAX_NUM]; - u32 rxfifo_size[RX_FIFO_MAX_NUM]; - u32 page_buff_addr; - u32 page_buff_size; -}; - -struct iwl_mvm { - /* for logger access */ - struct device *dev; - - struct iwl_trans *trans; - const struct iwl_fw *fw; - const struct iwl_cfg *cfg; - struct iwl_phy_db *phy_db; - struct ieee80211_hw *hw; - - /* for protecting access to iwl_mvm */ - struct mutex mutex; - struct list_head async_handlers_list; - spinlock_t async_handlers_lock; - struct work_struct async_handlers_wk; - - struct work_struct roc_done_wk; - - unsigned long status; - - /* - * for beacon filtering - - * currently only one interface can be supported - */ - struct iwl_mvm_vif *bf_allowed_vif; - - enum iwl_ucode_type cur_ucode; - bool ucode_loaded; - bool calibrating; - u32 error_event_table; - u32 log_event_table; - u32 umac_error_event_table; - bool support_umac_log; - struct iwl_sf_region sf_space; - - u32 ampdu_ref; - - struct iwl_notif_wait_data notif_wait; - - struct mvm_statistics_rx rx_stats; - - struct { - u64 rx_time; - u64 tx_time; - u64 on_time_rf; - u64 on_time_scan; - } radio_stats, accu_radio_stats; - - struct { - /* Map to HW queue */ - u32 hw_queue_to_mac80211; - u8 hw_queue_refcount; - bool setup_reserved; - u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ - } queue_info[IWL_MAX_HW_QUEUES]; - spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ - atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; - - const char *nvm_file_name; - struct iwl_nvm_data *nvm_data; - /* NVM sections */ - struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; - - /* Paging section */ - struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; - u16 num_of_paging_blk; - u16 num_of_pages_in_last_blk; - - /* EEPROM MAC addresses */ - struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; - - /* data related to data path */ - struct iwl_rx_phy_info last_phy_info; - struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; - struct work_struct sta_drained_wk; - unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - atomic_t pending_frames[IWL_MVM_STATION_COUNT]; - u32 tfd_drained[IWL_MVM_STATION_COUNT]; - u8 rx_ba_sessions; - - /* configured by mac80211 */ - u32 rts_threshold; - - /* Scan status, cmd (pre-allocated) and auxiliary station */ - unsigned int scan_status; - void *scan_cmd; - struct iwl_mcast_filter_cmd *mcast_filter_cmd; - - /* max number of simultaneous scans the FW supports */ - unsigned int max_scans; - - /* UMAC scan tracking */ - u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; - - /* rx chain antennas set through debugfs for the scan command */ - u8 scan_rx_ant; - -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* broadcast filters to configure for each associated station */ - const struct iwl_fw_bcast_filter *bcast_filters; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct { - bool override; - struct iwl_bcast_filter_cmd cmd; - } dbgfs_bcast_filtering; -#endif -#endif - - /* Internal station */ - struct iwl_mvm_int_sta aux_sta; - - bool last_ebs_successful; - - u8 scan_last_antenna_idx; /* to toggle TX between antennas */ - u8 mgmt_last_antenna_idx; - - /* last smart fifo state that was successfully sent to firmware */ - enum iwl_sf_state sf_state; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct dentry *debugfs_dir; - u32 dbgfs_sram_offset, dbgfs_sram_len; - u32 dbgfs_prph_reg_addr; - bool disable_power_off; - bool disable_power_off_d3; - - bool scan_iter_notif_enabled; - - struct debugfs_blob_wrapper nvm_hw_blob; - struct debugfs_blob_wrapper nvm_sw_blob; - struct debugfs_blob_wrapper nvm_calib_blob; - struct debugfs_blob_wrapper nvm_prod_blob; - struct debugfs_blob_wrapper nvm_phy_sku_blob; - - struct iwl_mvm_frame_stats drv_rx_stats; - spinlock_t drv_stats_lock; - u16 dbgfs_rx_phyinfo; -#endif - - struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; - - struct list_head time_event_list; - spinlock_t time_event_lock; - - /* - * A bitmap indicating the index of the key in use. The firmware - * can hold 16 keys at most. Reflect this fact. - */ - unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; - u8 fw_key_deleted[STA_KEY_MAX_NUM]; - - /* references taken by the driver and spinlock protecting them */ - spinlock_t refs_lock; - u8 refs[IWL_MVM_REF_COUNT]; - - u8 vif_count; - - /* -1 for always, 0 for never, >0 for that many times */ - s8 restart_fw; - u8 fw_dbg_conf; - struct delayed_work fw_dump_wk; - struct iwl_mvm_dump_desc *fw_dump_desc; - struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; - -#ifdef CONFIG_IWLWIFI_LEDS - struct led_classdev led; -#endif - - struct ieee80211_vif *p2p_device_vif; - -#ifdef CONFIG_PM_SLEEP - struct wiphy_wowlan_support wowlan; - int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; - - /* sched scan settings for net detect */ - struct cfg80211_sched_scan_request *nd_config; - struct ieee80211_scan_ies nd_ies; - struct cfg80211_match_set *nd_match_sets; - int n_nd_match_sets; - struct ieee80211_channel **nd_channels; - int n_nd_channels; - bool net_detect; -#ifdef CONFIG_IWLWIFI_DEBUGFS - bool d3_wake_sysassert; - bool d3_test_active; - bool store_d3_resume_sram; - void *d3_resume_sram; - u32 d3_test_pme_ptr; - struct ieee80211_vif *keep_vif; - u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ -#endif -#endif - - /* d0i3 */ - u8 d0i3_ap_sta_id; - bool d0i3_offloading; - struct work_struct d0i3_exit_work; - struct sk_buff_head d0i3_tx; - /* protect d0i3_suspend_flags */ - struct mutex d0i3_suspend_mutex; - unsigned long d0i3_suspend_flags; - /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ - spinlock_t d0i3_tx_lock; - wait_queue_head_t d0i3_exit_waitq; - - /* BT-Coex */ - u8 bt_ack_kill_msk[NUM_PHY_CTX]; - u8 bt_cts_kill_msk[NUM_PHY_CTX]; - - struct iwl_bt_coex_profile_notif_old last_bt_notif_old; - struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old; - struct iwl_bt_coex_profile_notif last_bt_notif; - struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; - - u32 last_ant_isol; - u8 last_corun_lut; - u8 bt_tx_prio; - enum iwl_bt_force_ant_mode bt_force_ant_mode; - - /* Aux ROC */ - struct list_head aux_roc_te_list; - - /* Thermal Throttling and CTkill */ - struct iwl_mvm_tt_mgmt thermal_throttle; - s32 temperature; /* Celsius */ - /* - * Debug option to set the NIC temperature. This option makes the - * driver think this is the actual NIC temperature, and ignore the - * real temperature that is received from the fw - */ - bool temperature_test; /* Debug test temperature is enabled */ - - struct iwl_time_quota_cmd last_quota_cmd; - -#ifdef CONFIG_NL80211_TESTMODE - u32 noa_duration; - struct ieee80211_vif *noa_vif; -#endif - - /* Tx queues */ - u8 aux_queue; - u8 first_agg_queue; - u8 last_agg_queue; - - /* Indicate if device power save is allowed */ - u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ - - struct ieee80211_vif __rcu *csa_vif; - struct ieee80211_vif __rcu *csa_tx_blocked_vif; - u8 csa_tx_block_bcn_timeout; - - /* system time of last beacon (for AP/GO interface) */ - u32 ap_last_beacon_gp2; - - bool lar_regdom_set; - enum iwl_mcc_source mcc_src; - - u8 low_latency_agg_frame_limit; - - /* TDLS channel switch data */ - struct { - struct delayed_work dwork; - enum iwl_mvm_tdls_cs_state state; - - /* - * Current cs sta - might be different from periodic cs peer - * station. Value is meaningless when the cs-state is idle. - */ - u8 cur_sta_id; - - /* TDLS periodic channel-switch peer */ - struct { - u8 sta_id; - u8 op_class; - bool initiator; /* are we the link initiator */ - struct cfg80211_chan_def chandef; - struct sk_buff *skb; /* ch sw template */ - u32 ch_sw_tm_ie; - - /* timestamp of last ch-sw request sent (GP2 time) */ - u32 sent_timestamp; - } peer; - } tdls_cs; - - struct iwl_mvm_shared_mem_cfg shared_mem_cfg; - - u32 ciphers[6]; - struct iwl_mvm_tof_data tof_data; -}; - -/* Extract MVM priv from op_mode and _hw */ -#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ - ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) - -#define IWL_MAC80211_GET_MVM(_hw) \ - IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) - -enum iwl_mvm_status { - IWL_MVM_STATUS_HW_RFKILL, - IWL_MVM_STATUS_HW_CTKILL, - IWL_MVM_STATUS_ROC_RUNNING, - IWL_MVM_STATUS_IN_HW_RESTART, - IWL_MVM_STATUS_IN_D0I3, - IWL_MVM_STATUS_ROC_AUX_RUNNING, - IWL_MVM_STATUS_D3_RECONFIG, - IWL_MVM_STATUS_DUMPING_FW_LOG, -}; - -static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) -{ - return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || - test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); -} - -static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) -{ - return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); -} - -/* Must be called with rcu_read_lock() held and it can only be - * released when mvmsta is not needed anymore. - */ -static inline struct iwl_mvm_sta * -iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) -{ - struct ieee80211_sta *sta; - - if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) - return NULL; - - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return NULL; - - return iwl_mvm_sta_from_mac80211(sta); -} - -static inline struct iwl_mvm_sta * -iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) -{ - struct ieee80211_sta *sta; - - if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) - return NULL; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return NULL; - - return iwl_mvm_sta_from_mac80211(sta); -} - -static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) -{ - return mvm->trans->cfg->d0i3 && - mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF && - !iwlwifi_mod_params.d0i3_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); -} - -static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT); -} - -static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) -{ - bool nvm_lar = mvm->nvm_data->lar_enabled; - bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - if (iwlwifi_mod_params.lar_disable) - return false; - - /* - * Enable LAR only if it is supported by the FW (TLV) && - * enabled in the NVM - */ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) - return nvm_lar && tlv_lar; - else - return tlv_lar; -} - -static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) -{ - return fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); -} - -static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && - IWL_MVM_BT_COEX_CORUNNING; -} - -static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && - IWL_MVM_BT_COEX_RRC; -} - -static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); -} - -static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) -{ - /* firmware flag isn't defined yet */ - return false; -} - -extern const u8 iwl_mvm_ac_to_tx_fifo[]; - -struct iwl_rate_info { - u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ - u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ - u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ - u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ - u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ -}; - -void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); -int __iwl_mvm_mac_start(struct iwl_mvm *mvm); - -/****************** - * MVM Methods - ******************/ -/* uCode */ -int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); - -/* Utils */ -int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, - enum ieee80211_band band); -void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum ieee80211_band band, - struct ieee80211_tx_rate *r); -u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); -void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); -u8 first_antenna(u8 mask); -u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); - -/* Tx / Host Commands */ -int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, - struct iwl_host_cmd *cmd); -int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, - u32 flags, u16 len, const void *data); -int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, - struct iwl_host_cmd *cmd, - u32 *status); -int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, - u16 len, const void *data, - u32 *status); -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta); -int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); -void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, u8 sta_id); -void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, __le16 fc); -#ifdef CONFIG_IWLWIFI_DEBUG -const char *iwl_mvm_get_tx_fail_reason(u32 status); -#else -static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } -#endif -int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags); -void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); - -static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd) -{ - struct ieee80211_key_conf *keyconf = info->control.hw_key; - - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); -} - -static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) -{ - flush_work(&mvm->async_handlers_wk); -} - -/* Statistics */ -void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt); -void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); -void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); - -/* NVM */ -int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); -int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); - -static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) -{ - return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? - mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : - mvm->fw->valid_tx_ant; -} - -static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) -{ - return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? - mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : - mvm->fw->valid_rx_ant; -} - -static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) -{ - u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | - FW_PHY_CFG_RX_CHAIN); - u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); - u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); - - phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | - valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; - - return mvm->fw->phy_config & phy_config; -} - -int iwl_mvm_up(struct iwl_mvm *mvm); -int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); - -int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd); - -/* - * FW notifications / CMD responses handlers - * Convention: iwl_mvm_rx_ - */ -void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -/* MVM PHY */ -int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic); -int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic); -void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt); -void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt); -int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); -u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); -u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); - -/* MAC (virtual interface) programming */ -int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool force_assoc_off, const u8 *bssid_override); -int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); -int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *exclude_vif); -/* Bindings */ -int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); - -/* Quota management */ -int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, - struct ieee80211_vif *disabled_vif); - -/* Scanning */ -int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req, - struct ieee80211_scan_ies *ies); -int iwl_mvm_scan_size(struct iwl_mvm *mvm); -int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); -int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); -void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); - -/* Scheduled scan */ -void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies, - int type); -void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -/* UMAC scan */ -int iwl_mvm_config_scan(struct iwl_mvm *mvm); -void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -/* MVM debugfs */ -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); -void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -#else -static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, - struct dentry *dbgfs_dir) -{ - return 0; -} -static inline void -iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ -} -static inline void -iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ -} -#endif /* CONFIG_IWLWIFI_DEBUGFS */ - -/* rate scaling */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); -void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); -int rs_pretty_print_rate(char *buf, const u32 rate); -void rs_update_last_rssi(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct ieee80211_rx_status *rx_status); - -/* power management */ -int iwl_mvm_power_update_device(struct iwl_mvm *mvm); -int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); -int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); -int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - char *buf, int bufsz); - -void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -#ifdef CONFIG_IWLWIFI_LEDS -int iwl_mvm_leds_init(struct iwl_mvm *mvm); -void iwl_mvm_leds_exit(struct iwl_mvm *mvm); -#else -static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) -{ - return 0; -} -static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) -{ -} -#endif - -/* D3 (WoWLAN, NetDetect) */ -int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); -int iwl_mvm_resume(struct ieee80211_hw *hw); -void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); -void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data); -void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct inet6_dev *idev); -void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int idx); -extern const struct file_operations iwl_dbgfs_d3_test_ops; -#ifdef CONFIG_PM_SLEEP -void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#else -static inline void -iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ -} -#endif -void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd *cmd); -int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool disable_offloading, - u32 cmd_flags); - -/* D0i3 */ -void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); -void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); -int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); -bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); -void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); -int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode); -int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode); -int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); - -/* BT Coex */ -int iwl_send_bt_init_conf(struct iwl_mvm *mvm); -void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data); -void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); -u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); -bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); -bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, - enum ieee80211_band band); -u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, u8 ac); - -bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); -void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); -int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); -void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data); -u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, - enum ieee80211_band band); -void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -/* beacon filtering */ -#ifdef CONFIG_IWLWIFI_DEBUGFS -void -iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd); -#else -static inline void -iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd) -{} -#endif -int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, u32 flags); -int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags); -int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags); -/* SMPS */ -void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum iwl_mvm_smps_type_request req_type, - enum ieee80211_smps_mode smps_request); -bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm); - -/* Low latency */ -int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool value); -/* get SystemLowLatencyMode - only needed for beacon threshold? */ -bool iwl_mvm_low_latency(struct iwl_mvm *mvm); -/* get VMACLowLatencyMode */ -static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) -{ - /* - * should this consider associated/active/... state? - * - * Normally low-latency should only be active on interfaces - * that are active, but at least with debugfs it can also be - * enabled on interfaces that aren't active. However, when - * interface aren't active then they aren't added into the - * binding, so this has no real impact. For now, just return - * the current desired low-latency state. - */ - - return mvmvif->low_latency; -} - -/* hw scheduler queue config */ -void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout); -/* - * Disable a TXQ. - * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. - */ -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags); -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq); - -static inline -void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 fifo, u16 ssn, unsigned int wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); -} - -static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, int fifo, - int sta_id, int tid, int frame_limit, - u16 ssn, unsigned int wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .sta_id = sta_id, - .tid = tid, - .frame_limit = frame_limit, - .aggregate = true, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); -} - -/* Thermal management and CT-kill */ -void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); -void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); -void iwl_mvm_temp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_tt_handler(struct iwl_mvm *mvm); -void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); -void iwl_mvm_tt_exit(struct iwl_mvm *mvm); -void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); -int iwl_mvm_get_temp(struct iwl_mvm *mvm); - -/* Location Aware Regulatory */ -struct iwl_mcc_update_resp * -iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, - enum iwl_mcc_source src_id); -int iwl_mvm_init_mcc(struct iwl_mvm *mvm); -void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, - const char *alpha2, - enum iwl_mcc_source src_id, - bool *changed); -struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, - bool *changed); -int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); -void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); - -/* smart fifo */ -int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool added_vif); - -/* TDLS */ - -/* - * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. - * This TID is marked as used vs the AP and all connected TDLS peers. - */ -#define IWL_MVM_TDLS_FW_TID 4 - -int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); -void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool sta_added); -void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u8 oper_class, - struct cfg80211_chan_def *chandef, - struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); -void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_tdls_ch_sw_params *params); -void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); - -struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); - -void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); - -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - struct iwl_mvm_dump_desc *desc, - struct iwl_fw_dbg_trigger_tlv *trigger); -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) __printf(3, 4); -unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool tdls, bool cmd_q); -void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - const char *errmsg); -static inline bool -iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, - struct ieee80211_vif *vif) -{ - u32 trig_vif = le32_to_cpu(trig->vif_type); - - return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; -} - -static inline bool -iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && - (mvm->fw_dbg_conf == FW_DBG_INVALID || - (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); -} - -static inline bool -iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) - return false; - - return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); -} - -static inline void -iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - enum iwl_fw_dbg_trigger trig) -{ - struct iwl_fw_dbg_trigger_tlv *trigger; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig)) - return; - - trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig); - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); -} - -#endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c deleted file mode 100644 index 2ee0f6fe56a1..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ /dev/null @@ -1,864 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include -#include -#include -#include "iwl-trans.h" -#include "iwl-csr.h" -#include "mvm.h" -#include "iwl-eeprom-parse.h" -#include "iwl-eeprom-read.h" -#include "iwl-nvm-parse.h" -#include "iwl-prph.h" - -/* Default NVM size to read */ -#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) -#define IWL_MAX_NVM_SECTION_SIZE 0x1b58 -#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc - -#define NVM_WRITE_OPCODE 1 -#define NVM_READ_OPCODE 0 - -/* load nvm chunk response */ -enum { - READ_NVM_CHUNK_SUCCEED = 0, - READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 -}; - -/* - * prepare the NVM host command w/ the pointers to the nvm buffer - * and send it to fw - */ -static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, - u16 offset, u16 length, const u8 *data) -{ - struct iwl_nvm_access_cmd nvm_access_cmd = { - .offset = cpu_to_le16(offset), - .length = cpu_to_le16(length), - .type = cpu_to_le16(section), - .op_code = NVM_WRITE_OPCODE, - }; - struct iwl_host_cmd cmd = { - .id = NVM_ACCESS_CMD, - .len = { sizeof(struct iwl_nvm_access_cmd), length }, - .flags = CMD_SEND_IN_RFKILL, - .data = { &nvm_access_cmd, data }, - /* data may come from vmalloc, so use _DUP */ - .dataflags = { 0, IWL_HCMD_DFL_DUP }, - }; - - return iwl_mvm_send_cmd(mvm, &cmd); -} - -static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, - u16 offset, u16 length, u8 *data) -{ - struct iwl_nvm_access_cmd nvm_access_cmd = { - .offset = cpu_to_le16(offset), - .length = cpu_to_le16(length), - .type = cpu_to_le16(section), - .op_code = NVM_READ_OPCODE, - }; - struct iwl_nvm_access_resp *nvm_resp; - struct iwl_rx_packet *pkt; - struct iwl_host_cmd cmd = { - .id = NVM_ACCESS_CMD, - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &nvm_access_cmd, }, - }; - int ret, bytes_read, offset_read; - u8 *resp_data; - - cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) - return ret; - - pkt = cmd.resp_pkt; - - /* Extract NVM response */ - nvm_resp = (void *)pkt->data; - ret = le16_to_cpu(nvm_resp->status); - bytes_read = le16_to_cpu(nvm_resp->length); - offset_read = le16_to_cpu(nvm_resp->offset); - resp_data = nvm_resp->data; - if (ret) { - if ((offset != 0) && - (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { - /* - * meaning of NOT_VALID_ADDRESS: - * driver try to read chunk from address that is - * multiple of 2K and got an error since addr is empty. - * meaning of (offset != 0): driver already - * read valid data from another chunk so this case - * is not an error. - */ - IWL_DEBUG_EEPROM(mvm->trans->dev, - "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", - offset); - ret = 0; - } else { - IWL_DEBUG_EEPROM(mvm->trans->dev, - "NVM access command failed with status %d (device: %s)\n", - ret, mvm->cfg->name); - ret = -EIO; - } - goto exit; - } - - if (offset_read != offset) { - IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", - offset_read); - ret = -EINVAL; - goto exit; - } - - /* Write data to NVM */ - memcpy(data + offset, resp_data, bytes_read); - ret = bytes_read; - -exit: - iwl_free_resp(&cmd); - return ret; -} - -static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, - const u8 *data, u16 length) -{ - int offset = 0; - - /* copy data in chunks of 2k (and remainder if any) */ - - while (offset < length) { - int chunk_size, ret; - - chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, - length - offset); - - ret = iwl_nvm_write_chunk(mvm, section, offset, - chunk_size, data + offset); - if (ret < 0) - return ret; - - offset += chunk_size; - } - - return 0; -} - -/* - * Reads an NVM section completely. - * NICs prior to 7000 family doesn't have a real NVM, but just read - * section 0 which is the EEPROM. Because the EEPROM reading is unlimited - * by uCode, we need to manually check in this case that we don't - * overflow and try to read more than the EEPROM size. - * For 7000 family NICs, we supply the maximal size we can read, and - * the uCode fills the response with as much data as we can, - * without overflowing, so no check is needed. - */ -static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, - u8 *data, u32 size_read) -{ - u16 length, offset = 0; - int ret; - - /* Set nvm section read length */ - length = IWL_NVM_DEFAULT_CHUNK_SIZE; - - ret = length; - - /* Read the NVM until exhausted (reading less than requested) */ - while (ret == length) { - /* Check no memory assumptions fail and cause an overflow */ - if ((size_read + offset + length) > - mvm->cfg->base_params->eeprom_size) { - IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); - return -ENOBUFS; - } - - ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); - if (ret < 0) { - IWL_DEBUG_EEPROM(mvm->trans->dev, - "Cannot read NVM from section %d offset %d, length %d\n", - section, offset, length); - return ret; - } - offset += ret; - } - - IWL_DEBUG_EEPROM(mvm->trans->dev, - "NVM section %d read completed\n", section); - return offset; -} - -static struct iwl_nvm_data * -iwl_parse_nvm_sections(struct iwl_mvm *mvm) -{ - struct iwl_nvm_section *sections = mvm->nvm_sections; - const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; - bool lar_enabled; - u32 mac_addr0, mac_addr1; - - /* Checking for required sections */ - if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { - if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || - !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { - IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); - return NULL; - } - } else { - /* SW and REGULATORY sections are mandatory */ - if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || - !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { - IWL_ERR(mvm, - "Can't parse empty family 8000 OTP/NVM sections\n"); - return NULL; - } - /* MAC_OVERRIDE or at least HW section must exist */ - if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && - !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { - IWL_ERR(mvm, - "Can't parse mac_address, empty sections\n"); - return NULL; - } - - /* PHY_SKU section is mandatory in B0 */ - if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { - IWL_ERR(mvm, - "Can't parse phy_sku in B0, empty sections\n"); - return NULL; - } - } - - if (WARN_ON(!mvm->cfg)) - return NULL; - - /* read the mac address from WFMP registers */ - mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); - mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); - - hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; - sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; - calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; - regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; - mac_override = - (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; - phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; - - lar_enabled = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, - regulatory, mac_override, phy_sku, - mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled, mac_addr0, mac_addr1, - mvm->trans->hw_id); -} - -#define MAX_NVM_FILE_LEN 16384 - -/* - * Reads external NVM from a file into mvm->nvm_sections - * - * HOW TO CREATE THE NVM FILE FORMAT: - * ------------------------------ - * 1. create hex file, format: - * 3800 -> header - * 0000 -> header - * 5a40 -> data - * - * rev - 6 bit (word1) - * len - 10 bit (word1) - * id - 4 bit (word2) - * rsv - 12 bit (word2) - * - * 2. flip 8bits with 8 bits per line to get the right NVM file format - * - * 3. create binary file from the hex file - * - * 4. save as "iNVM_xxx.bin" under /lib/firmware - */ -static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) -{ - int ret, section_size; - u16 section_id; - const struct firmware *fw_entry; - const struct { - __le16 word1; - __le16 word2; - u8 data[]; - } *file_sec; - const u8 *eof, *temp; - int max_section_size; - const __le32 *dword_buff; - -#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) -#define NVM_WORD2_ID(x) (x >> 12) -#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8)) -#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4) -#define NVM_HEADER_0 (0x2A504C54) -#define NVM_HEADER_1 (0x4E564D2A) -#define NVM_HEADER_SIZE (4 * sizeof(u32)) - - IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); - - /* Maximal size depends on HW family and step */ - if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - max_section_size = IWL_MAX_NVM_SECTION_SIZE; - else - max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE; - - /* - * Obtain NVM image via request_firmware. Since we already used - * request_firmware_nowait() for the firmware binary load and only - * get here after that we assume the NVM request can be satisfied - * synchronously. - */ - ret = request_firmware(&fw_entry, mvm->nvm_file_name, - mvm->trans->dev); - if (ret) { - IWL_ERR(mvm, "ERROR: %s isn't available %d\n", - mvm->nvm_file_name, ret); - return ret; - } - - IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", - mvm->nvm_file_name, fw_entry->size); - - if (fw_entry->size > MAX_NVM_FILE_LEN) { - IWL_ERR(mvm, "NVM file too large\n"); - ret = -EINVAL; - goto out; - } - - eof = fw_entry->data + fw_entry->size; - dword_buff = (__le32 *)fw_entry->data; - - /* some NVM file will contain a header. - * The header is identified by 2 dwords header as follow: - * dword[0] = 0x2A504C54 - * dword[1] = 0x4E564D2A - * - * This header must be skipped when providing the NVM data to the FW. - */ - if (fw_entry->size > NVM_HEADER_SIZE && - dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && - dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { - file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); - IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); - IWL_INFO(mvm, "NVM Manufacturing date %08X\n", - le32_to_cpu(dword_buff[3])); - - /* nvm file validation, dword_buff[2] holds the file version */ - if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP && - le32_to_cpu(dword_buff[2]) < 0xE4A) || - (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP && - le32_to_cpu(dword_buff[2]) >= 0xE4A)) { - ret = -EFAULT; - goto out; - } - } else { - file_sec = (void *)fw_entry->data; - } - - while (true) { - if (file_sec->data > eof) { - IWL_ERR(mvm, - "ERROR - NVM file too short for section header\n"); - ret = -EINVAL; - break; - } - - /* check for EOF marker */ - if (!file_sec->word1 && !file_sec->word2) { - ret = 0; - break; - } - - if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { - section_size = - 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); - section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); - } else { - section_size = 2 * NVM_WORD2_LEN_FAMILY_8000( - le16_to_cpu(file_sec->word2)); - section_id = NVM_WORD1_ID_FAMILY_8000( - le16_to_cpu(file_sec->word1)); - } - - if (section_size > max_section_size) { - IWL_ERR(mvm, "ERROR - section too large (%d)\n", - section_size); - ret = -EINVAL; - break; - } - - if (!section_size) { - IWL_ERR(mvm, "ERROR - section empty\n"); - ret = -EINVAL; - break; - } - - if (file_sec->data + section_size > eof) { - IWL_ERR(mvm, - "ERROR - NVM file too short for section (%d bytes)\n", - section_size); - ret = -EINVAL; - break; - } - - if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, - "Invalid NVM section ID %d\n", section_id)) { - ret = -EINVAL; - break; - } - - temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); - if (!temp) { - ret = -ENOMEM; - break; - } - kfree(mvm->nvm_sections[section_id].data); - mvm->nvm_sections[section_id].data = temp; - mvm->nvm_sections[section_id].length = section_size; - - /* advance to the next section */ - file_sec = (void *)(file_sec->data + section_size); - } -out: - release_firmware(fw_entry); - return ret; -} - -/* Loads the NVM data stored in mvm->nvm_sections into the NIC */ -int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) -{ - int i, ret = 0; - struct iwl_nvm_section *sections = mvm->nvm_sections; - - IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n"); - - for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) { - if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) - continue; - ret = iwl_nvm_write_section(mvm, i, sections[i].data, - sections[i].length); - if (ret < 0) { - IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); - break; - } - } - return ret; -} - -int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) -{ - int ret, section; - u32 size_read = 0; - u8 *nvm_buffer, *temp; - const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step; - const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; - - if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) - return -EINVAL; - - /* load NVM values from nic */ - if (read_nvm_from_nic) { - /* Read From FW NVM */ - IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); - - nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, - GFP_KERNEL); - if (!nvm_buffer) - return -ENOMEM; - for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { - /* we override the constness for initial read */ - ret = iwl_nvm_read_section(mvm, section, nvm_buffer, - size_read); - if (ret < 0) - continue; - size_read += ret; - temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); - if (!temp) { - ret = -ENOMEM; - break; - } - mvm->nvm_sections[section].data = temp; - mvm->nvm_sections[section].length = ret; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - switch (section) { - case NVM_SECTION_TYPE_SW: - mvm->nvm_sw_blob.data = temp; - mvm->nvm_sw_blob.size = ret; - break; - case NVM_SECTION_TYPE_CALIBRATION: - mvm->nvm_calib_blob.data = temp; - mvm->nvm_calib_blob.size = ret; - break; - case NVM_SECTION_TYPE_PRODUCTION: - mvm->nvm_prod_blob.data = temp; - mvm->nvm_prod_blob.size = ret; - break; - case NVM_SECTION_TYPE_PHY_SKU: - mvm->nvm_phy_sku_blob.data = temp; - mvm->nvm_phy_sku_blob.size = ret; - break; - default: - if (section == mvm->cfg->nvm_hw_section_num) { - mvm->nvm_hw_blob.data = temp; - mvm->nvm_hw_blob.size = ret; - break; - } - } -#endif - } - if (!size_read) - IWL_ERR(mvm, "OTP is blank\n"); - kfree(nvm_buffer); - } - - /* Only if PNVM selected in the mod param - load external NVM */ - if (mvm->nvm_file_name) { - /* read External NVM file from the mod param */ - ret = iwl_mvm_read_external_nvm(mvm); - if (ret) { - /* choose the nvm_file name according to the - * HW step - */ - if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == - SILICON_B_STEP) - mvm->nvm_file_name = nvm_file_B; - else - mvm->nvm_file_name = nvm_file_C; - - if (ret == -EFAULT && mvm->nvm_file_name) { - /* in case nvm file was failed try again */ - ret = iwl_mvm_read_external_nvm(mvm); - if (ret) - return ret; - } else { - return ret; - } - } - } - - /* parse the relevant nvm sections */ - mvm->nvm_data = iwl_parse_nvm_sections(mvm); - if (!mvm->nvm_data) - return -ENODATA; - IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", - mvm->nvm_data->nvm_version); - - return 0; -} - -struct iwl_mcc_update_resp * -iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, - enum iwl_mcc_source src_id) -{ - struct iwl_mcc_update_cmd mcc_update_cmd = { - .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), - .source_id = (u8)src_id, - }; - struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL; - struct iwl_rx_packet *pkt; - struct iwl_host_cmd cmd = { - .id = MCC_UPDATE_CMD, - .flags = CMD_WANT_SKB, - .data = { &mcc_update_cmd }, - }; - - int ret; - u32 status; - int resp_len, n_channels; - u16 mcc; - - if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) - return ERR_PTR(-EOPNOTSUPP); - - cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); - - IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", - alpha2[0], alpha2[1], src_id); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) - return ERR_PTR(ret); - - pkt = cmd.resp_pkt; - - /* Extract MCC response */ - mcc_resp = (void *)pkt->data; - status = le32_to_cpu(mcc_resp->status); - - mcc = le16_to_cpu(mcc_resp->mcc); - - /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ - if (mcc == 0) { - mcc = 0x3030; /* "00" - world */ - mcc_resp->mcc = cpu_to_le16(mcc); - } - - n_channels = __le32_to_cpu(mcc_resp->n_channels); - IWL_DEBUG_LAR(mvm, - "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", - status, mcc, mcc >> 8, mcc & 0xff, - !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); - - resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32); - resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); - if (!resp_cp) { - ret = -ENOMEM; - goto exit; - } - - ret = 0; -exit: - iwl_free_resp(&cmd); - if (ret) - return ERR_PTR(ret); - return resp_cp; -} - -#ifdef CONFIG_ACPI -#define WRD_METHOD "WRDD" -#define WRDD_WIFI (0x07) -#define WRDD_WIGIG (0x10) - -static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd) -{ - union acpi_object *mcc_pkg, *domain_type, *mcc_value; - u32 i; - - if (wrdd->type != ACPI_TYPE_PACKAGE || - wrdd->package.count < 2 || - wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || - wrdd->package.elements[0].integer.value != 0) { - IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n"); - return 0; - } - - for (i = 1 ; i < wrdd->package.count ; ++i) { - mcc_pkg = &wrdd->package.elements[i]; - - if (mcc_pkg->type != ACPI_TYPE_PACKAGE || - mcc_pkg->package.count < 2 || - mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || - mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { - mcc_pkg = NULL; - continue; - } - - domain_type = &mcc_pkg->package.elements[0]; - if (domain_type->integer.value == WRDD_WIFI) - break; - - mcc_pkg = NULL; - } - - if (mcc_pkg) { - mcc_value = &mcc_pkg->package.elements[1]; - return mcc_value->integer.value; - } - - return 0; -} - -static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) -{ - acpi_handle root_handle; - acpi_handle handle; - struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - u32 mcc_val; - struct pci_dev *pdev = to_pci_dev(mvm->dev); - - root_handle = ACPI_HANDLE(&pdev->dev); - if (!root_handle) { - IWL_DEBUG_LAR(mvm, - "Could not retrieve root port ACPI handle\n"); - return -ENOENT; - } - - /* Get the method's handle */ - status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); - if (ACPI_FAILURE(status)) { - IWL_DEBUG_LAR(mvm, "WRD method not found\n"); - return -ENOENT; - } - - /* Call WRDD with no arguments */ - status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); - if (ACPI_FAILURE(status)) { - IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status); - return -ENOENT; - } - - mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer); - kfree(wrdd.pointer); - if (!mcc_val) - return -ENOENT; - - mcc[0] = (mcc_val >> 8) & 0xff; - mcc[1] = mcc_val & 0xff; - mcc[2] = '\0'; - return 0; -} -#else /* CONFIG_ACPI */ -static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) -{ - return -ENOENT; -} -#endif - -int iwl_mvm_init_mcc(struct iwl_mvm *mvm) -{ - bool tlv_lar; - bool nvm_lar; - int retval; - struct ieee80211_regdomain *regd; - char mcc[3]; - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - nvm_lar = mvm->nvm_data->lar_enabled; - if (tlv_lar != nvm_lar) - IWL_INFO(mvm, - "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n", - tlv_lar ? "enabled" : "disabled", - nvm_lar ? "enabled" : "disabled"); - } - - if (!iwl_mvm_is_lar_supported(mvm)) - return 0; - - /* - * try to replay the last set MCC to FW. If it doesn't exist, - * queue an update to cfg80211 to retrieve the default alpha2 from FW. - */ - retval = iwl_mvm_init_fw_regd(mvm); - if (retval != -ENOENT) - return retval; - - /* - * Driver regulatory hint for initial update, this also informs the - * firmware we support wifi location updates. - * Disallow scans that might crash the FW while the LAR regdomain - * is not set. - */ - mvm->lar_regdom_set = false; - - regd = iwl_mvm_get_current_regdomain(mvm, NULL); - if (IS_ERR_OR_NULL(regd)) - return -EIO; - - if (iwl_mvm_is_wifi_mcc_supported(mvm) && - !iwl_mvm_get_bios_mcc(mvm, mcc)) { - kfree(regd); - regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, - MCC_SOURCE_BIOS, NULL); - if (IS_ERR_OR_NULL(regd)) - return -EIO; - } - - retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); - kfree(regd); - return retval; -} - -void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mcc_chub_notif *notif = (void *)pkt->data; - enum iwl_mcc_source src; - char mcc[3]; - struct ieee80211_regdomain *regd; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) - return; - - mcc[0] = notif->mcc >> 8; - mcc[1] = notif->mcc & 0xff; - mcc[2] = '\0'; - src = notif->source_id; - - IWL_DEBUG_LAR(mvm, - "RX: received chub update mcc cmd (mcc '%s' src %d)\n", - mcc, src); - regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); - if (IS_ERR_OR_NULL(regd)) - return; - - regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); - kfree(regd); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c deleted file mode 100644 index 68b0169c8892..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/offloading.c +++ /dev/null @@ -1,217 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include -#include "mvm.h" - -void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd *cmd) -{ - int i; - - /* - * For QoS counters, we store the one to use next, so subtract 0x10 - * since the uCode will add 0x10 *before* using the value while we - * increment after using the value (i.e. store the next value to use). - */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = mvm_ap_sta->tid_data[i].seq_number; - seq -= 0x10; - cmd->qos_seq[i] = cpu_to_le16(seq); - } -} - -int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool disable_offloading, - u32 cmd_flags) -{ - union { - struct iwl_proto_offload_cmd_v1 v1; - struct iwl_proto_offload_cmd_v2 v2; - struct iwl_proto_offload_cmd_v3_small v3s; - struct iwl_proto_offload_cmd_v3_large v3l; - } cmd = {}; - struct iwl_host_cmd hcmd = { - .id = PROT_OFFLOAD_CONFIG_CMD, - .flags = cmd_flags, - .data[0] = &cmd, - .dataflags[0] = IWL_HCMD_DFL_DUP, - }; - struct iwl_proto_offload_cmd_common *common; - u32 enabled = 0, size; - u32 capa_flags = mvm->fw->ucode_capa.flags; -#if IS_ENABLED(CONFIG_IPV6) - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int i; - - if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || - capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { - struct iwl_ns_config *nsc; - struct iwl_targ_addr *addrs; - int n_nsc, n_addrs; - int c; - - if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { - nsc = cmd.v3s.ns_config; - n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S; - addrs = cmd.v3s.targ_addrs; - n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S; - } else { - nsc = cmd.v3l.ns_config; - n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; - addrs = cmd.v3l.targ_addrs; - n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; - } - - if (mvmvif->num_target_ipv6_addrs) - enabled |= IWL_D3_PROTO_OFFLOAD_NS; - - /* - * For each address we have (and that will fit) fill a target - * address struct and combine for NS offload structs with the - * solicited node addresses. - */ - for (i = 0, c = 0; - i < mvmvif->num_target_ipv6_addrs && - i < n_addrs && c < n_nsc; i++) { - struct in6_addr solicited_addr; - int j; - - addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], - &solicited_addr); - for (j = 0; j < c; j++) - if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, - &solicited_addr) == 0) - break; - if (j == c) - c++; - addrs[i].addr = mvmvif->target_ipv6_addrs[i]; - addrs[i].config_num = cpu_to_le32(j); - nsc[j].dest_ipv6_addr = solicited_addr; - memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); - } - - if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) - cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i); - else - cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i); - } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { - if (mvmvif->num_target_ipv6_addrs) { - enabled |= IWL_D3_PROTO_OFFLOAD_NS; - memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); - } - - BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != - sizeof(mvmvif->target_ipv6_addrs[0])); - - for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, - IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) - memcpy(cmd.v2.target_ipv6_addr[i], - &mvmvif->target_ipv6_addrs[i], - sizeof(cmd.v2.target_ipv6_addr[i])); - } else { - if (mvmvif->num_target_ipv6_addrs) { - enabled |= IWL_D3_PROTO_OFFLOAD_NS; - memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); - } - - BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != - sizeof(mvmvif->target_ipv6_addrs[0])); - - for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, - IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) - memcpy(cmd.v1.target_ipv6_addr[i], - &mvmvif->target_ipv6_addrs[i], - sizeof(cmd.v1.target_ipv6_addr[i])); - } -#endif - - if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { - common = &cmd.v3s.common; - size = sizeof(cmd.v3s); - } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { - common = &cmd.v3l.common; - size = sizeof(cmd.v3l); - } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { - common = &cmd.v2.common; - size = sizeof(cmd.v2); - } else { - common = &cmd.v1.common; - size = sizeof(cmd.v1); - } - - if (vif->bss_conf.arp_addr_cnt) { - enabled |= IWL_D3_PROTO_OFFLOAD_ARP; - common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; - memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); - } - - if (!disable_offloading) - common->enabled = cpu_to_le32(enabled); - - hcmd.len[0] = size; - return iwl_mvm_send_cmd(mvm, &hcmd); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c deleted file mode 100644 index 13c97f665ba8..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ /dev/null @@ -1,1434 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include -#include - -#include "iwl-notif-wait.h" -#include "iwl-trans.h" -#include "iwl-op-mode.h" -#include "iwl-fw.h" -#include "iwl-debug.h" -#include "iwl-drv.h" -#include "iwl-modparams.h" -#include "mvm.h" -#include "iwl-phy-db.h" -#include "iwl-eeprom-parse.h" -#include "iwl-csr.h" -#include "iwl-io.h" -#include "iwl-prph.h" -#include "rs.h" -#include "fw-api-scan.h" -#include "time-event.h" - -#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - -static const struct iwl_op_mode_ops iwl_mvm_ops; -static const struct iwl_op_mode_ops iwl_mvm_ops_mq; - -struct iwl_mvm_mod_params iwlmvm_mod_params = { - .power_scheme = IWL_POWER_SCHEME_BPS, - .tfd_q_hang_detect = true - /* rest of fields are 0 by default */ -}; - -module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO); -MODULE_PARM_DESC(init_dbg, - "set to true to debug an ASSERT in INIT fw (default: false"); -module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); -MODULE_PARM_DESC(power_scheme, - "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); -module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, - bool, S_IRUGO); -MODULE_PARM_DESC(tfd_q_hang_detect, - "TFD queues hang detection (default: true"); - -/* - * module init and exit functions - */ -static int __init iwl_mvm_init(void) -{ - int ret; - - ret = iwl_mvm_rate_control_register(); - if (ret) { - pr_err("Unable to register rate control algorithm: %d\n", ret); - return ret; - } - - ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); - - if (ret) { - pr_err("Unable to register MVM op_mode: %d\n", ret); - iwl_mvm_rate_control_unregister(); - } - - return ret; -} -module_init(iwl_mvm_init); - -static void __exit iwl_mvm_exit(void) -{ - iwl_opmode_deregister("iwlmvm"); - iwl_mvm_rate_control_unregister(); -} -module_exit(iwl_mvm_exit); - -static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; - u32 reg_val = 0; - u32 phy_config = iwl_mvm_get_phy_config(mvm); - - radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> - FW_PHY_CFG_RADIO_TYPE_POS; - radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> - FW_PHY_CFG_RADIO_STEP_POS; - radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> - FW_PHY_CFG_RADIO_DASH_POS; - - /* SKU control */ - reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; - reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; - - /* radio configuration */ - reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; - reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; - reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; - - WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & - ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); - - /* - * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and - * shouldn't be set to any non-zero value. The same is supposed to be - * true of the other HW, but unsetting them (such as the 7260) causes - * automatic tests to fail on seemingly unrelated errors. Need to - * further investigate this, but for now we'll separate cases. - */ - if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; - - iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | - CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | - CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | - CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | - CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, - reg_val); - - IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, - radio_cfg_step, radio_cfg_dash); - - /* - * W/A : NIC is stuck in a reset state after Early PCIe power off - * (PCIe power is lost before PERST# is asserted), causing ME FW - * to lose ownership and not being able to obtain it back. - */ - if (!mvm->trans->cfg->apmg_not_supported) - iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, - ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); -} - -struct iwl_rx_handlers { - u16 cmd_id; - bool async; - void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -}; - -#define RX_HANDLER(_cmd_id, _fn, _async) \ - { .cmd_id = _cmd_id , .fn = _fn , .async = _async } -#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ - { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } - -/* - * Handlers for fw notifications - * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME - * This list should be in order of frequency for performance purposes. - * - * The handler can be SYNC - this means that it will be called in the Rx path - * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and - * only in this case!), it should be set as ASYNC. In that case, it will be - * called from a worker with mvm->mutex held. - */ -static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { - RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), - RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), - - RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), - RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true), - RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), - RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, - iwl_mvm_rx_ant_coupling_notif, true), - - RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), - RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), - - RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), - - RX_HANDLER(SCAN_ITERATION_COMPLETE, - iwl_mvm_rx_lmac_scan_iter_complete_notif, false), - RX_HANDLER(SCAN_OFFLOAD_COMPLETE, - iwl_mvm_rx_lmac_scan_complete_notif, true), - RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, - false), - RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, - true), - RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, - iwl_mvm_rx_umac_scan_iter_complete_notif, false), - - RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), - - RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, - false), - - RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), - RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, - iwl_mvm_power_uapsd_misbehaving_ap_notif, false), - RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), - RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, - iwl_mvm_temp_notif, true), - - RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, - true), - RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), - RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), - -}; -#undef RX_HANDLER -#undef RX_HANDLER_GRP -#define CMD(x) [x] = #x - -static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = { - CMD(MVM_ALIVE), - CMD(REPLY_ERROR), - CMD(ECHO_CMD), - CMD(INIT_COMPLETE_NOTIF), - CMD(PHY_CONTEXT_CMD), - CMD(MGMT_MCAST_KEY), - CMD(TX_CMD), - CMD(TXPATH_FLUSH), - CMD(SHARED_MEM_CFG), - CMD(MAC_CONTEXT_CMD), - CMD(TIME_EVENT_CMD), - CMD(TIME_EVENT_NOTIFICATION), - CMD(BINDING_CONTEXT_CMD), - CMD(TIME_QUOTA_CMD), - CMD(NON_QOS_TX_COUNTER_CMD), - CMD(DC2DC_CONFIG_CMD), - CMD(NVM_ACCESS_CMD), - CMD(PHY_CONFIGURATION_CMD), - CMD(CALIB_RES_NOTIF_PHY_DB), - CMD(SET_CALIB_DEFAULT_CMD), - CMD(FW_PAGING_BLOCK_CMD), - CMD(ADD_STA_KEY), - CMD(ADD_STA), - CMD(FW_GET_ITEM_CMD), - CMD(REMOVE_STA), - CMD(LQ_CMD), - CMD(SCAN_OFFLOAD_CONFIG_CMD), - CMD(MATCH_FOUND_NOTIFICATION), - CMD(SCAN_OFFLOAD_REQUEST_CMD), - CMD(SCAN_OFFLOAD_ABORT_CMD), - CMD(HOT_SPOT_CMD), - CMD(SCAN_OFFLOAD_COMPLETE), - CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), - CMD(SCAN_ITERATION_COMPLETE), - CMD(POWER_TABLE_CMD), - CMD(WEP_KEY), - CMD(REPLY_RX_PHY_CMD), - CMD(REPLY_RX_MPDU_CMD), - CMD(BEACON_NOTIFICATION), - CMD(BEACON_TEMPLATE_CMD), - CMD(STATISTICS_CMD), - CMD(STATISTICS_NOTIFICATION), - CMD(EOSP_NOTIFICATION), - CMD(REDUCE_TX_POWER_CMD), - CMD(TX_ANT_CONFIGURATION_CMD), - CMD(D3_CONFIG_CMD), - CMD(D0I3_END_CMD), - CMD(PROT_OFFLOAD_CONFIG_CMD), - CMD(OFFLOADS_QUERY_CMD), - CMD(REMOTE_WAKE_CONFIG_CMD), - CMD(WOWLAN_PATTERNS), - CMD(WOWLAN_CONFIGURATION), - CMD(WOWLAN_TSC_RSC_PARAM), - CMD(WOWLAN_TKIP_PARAM), - CMD(WOWLAN_KEK_KCK_MATERIAL), - CMD(WOWLAN_GET_STATUSES), - CMD(WOWLAN_TX_POWER_PER_DB), - CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD), - CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD), - CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD), - CMD(CARD_STATE_NOTIFICATION), - CMD(MISSED_BEACONS_NOTIFICATION), - CMD(BT_COEX_PRIO_TABLE), - CMD(BT_COEX_PROT_ENV), - CMD(BT_PROFILE_NOTIFICATION), - CMD(BT_CONFIG), - CMD(MCAST_FILTER_CMD), - CMD(BCAST_FILTER_CMD), - CMD(REPLY_SF_CFG_CMD), - CMD(REPLY_BEACON_FILTERING_CMD), - CMD(CMD_DTS_MEASUREMENT_TRIGGER), - CMD(DTS_MEASUREMENT_NOTIFICATION), - CMD(REPLY_THERMAL_MNG_BACKOFF), - CMD(MAC_PM_POWER_TABLE), - CMD(LTR_CONFIG), - CMD(BT_COEX_CI), - CMD(BT_COEX_UPDATE_SW_BOOST), - CMD(BT_COEX_UPDATE_CORUN_LUT), - CMD(BT_COEX_UPDATE_REDUCED_TXP), - CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), - CMD(ANTENNA_COUPLING_NOTIFICATION), - CMD(SCD_QUEUE_CFG), - CMD(SCAN_CFG_CMD), - CMD(SCAN_REQ_UMAC), - CMD(SCAN_ABORT_UMAC), - CMD(SCAN_COMPLETE_UMAC), - CMD(TDLS_CHANNEL_SWITCH_CMD), - CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), - CMD(TDLS_CONFIG_CMD), - CMD(MCC_UPDATE_CMD), - CMD(SCAN_ITERATION_COMPLETE_UMAC), -}; -#undef CMD - -/* this forward declaration can avoid to export the function */ -static void iwl_mvm_async_handlers_wk(struct work_struct *wk); -static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); - -static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) -{ - const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs; - - if (!pwr_tx_backoff) - return 0; - - while (pwr_tx_backoff->pwr) { - if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr) - return pwr_tx_backoff->backoff; - - pwr_tx_backoff++; - } - - return 0; -} - -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); - -static struct iwl_op_mode * -iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const struct iwl_fw *fw, struct dentry *dbgfs_dir) -{ - struct ieee80211_hw *hw; - struct iwl_op_mode *op_mode; - struct iwl_mvm *mvm; - struct iwl_trans_config trans_cfg = {}; - static const u8 no_reclaim_cmds[] = { - TX_CMD, - }; - int err, scan_size; - u32 min_backoff; - - /* - * We use IWL_MVM_STATION_COUNT to check the validity of the station - * index all over the driver - check that its value corresponds to the - * array size. - */ - BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT); - - /******************************** - * 1. Allocating and configuring HW data - ********************************/ - hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + - sizeof(struct iwl_mvm), - &iwl_mvm_hw_ops); - if (!hw) - return NULL; - - if (cfg->max_rx_agg_size) - hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; - - if (cfg->max_tx_agg_size) - hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; - - op_mode = hw->priv; - - mvm = IWL_OP_MODE_GET_MVM(op_mode); - mvm->dev = trans->dev; - mvm->trans = trans; - mvm->cfg = cfg; - mvm->fw = fw; - mvm->hw = hw; - - if (iwl_mvm_has_new_rx_api(mvm)) { - op_mode->ops = &iwl_mvm_ops_mq; - } else { - op_mode->ops = &iwl_mvm_ops; - - if (WARN_ON(trans->num_rx_queues > 1)) - goto out_free; - } - - mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; - - mvm->aux_queue = 15; - mvm->first_agg_queue = 16; - mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; - if (mvm->cfg->base_params->num_of_queues == 16) { - mvm->aux_queue = 11; - mvm->first_agg_queue = 12; - } - mvm->sf_state = SF_UNINIT; - mvm->low_latency_agg_frame_limit = 6; - mvm->cur_ucode = IWL_UCODE_INIT; - - mutex_init(&mvm->mutex); - mutex_init(&mvm->d0i3_suspend_mutex); - spin_lock_init(&mvm->async_handlers_lock); - INIT_LIST_HEAD(&mvm->time_event_list); - INIT_LIST_HEAD(&mvm->aux_roc_te_list); - INIT_LIST_HEAD(&mvm->async_handlers_list); - spin_lock_init(&mvm->time_event_lock); - spin_lock_init(&mvm->queue_info_lock); - - INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); - INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); - INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); - INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); - INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); - INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); - - spin_lock_init(&mvm->d0i3_tx_lock); - spin_lock_init(&mvm->refs_lock); - skb_queue_head_init(&mvm->d0i3_tx); - init_waitqueue_head(&mvm->d0i3_exit_waitq); - - SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); - - /* - * Populate the state variables that the transport layer needs - * to know about. - */ - trans_cfg.op_mode = op_mode; - trans_cfg.no_reclaim_cmds = no_reclaim_cmds; - trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; - trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_WIDE_CMD_HDR); - - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) - trans_cfg.bc_table_dword = true; - - trans_cfg.command_names = iwl_mvm_cmd_strings; - - trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; - trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; - trans_cfg.scd_set_active = true; - - trans_cfg.sdio_adma_addr = fw->sdio_adma_addr; - - /* Set a short watchdog for the command queue */ - trans_cfg.cmd_q_wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, false, true); - - snprintf(mvm->hw->wiphy->fw_version, - sizeof(mvm->hw->wiphy->fw_version), - "%s", fw->fw_version); - - /* Configure transport layer */ - iwl_trans_configure(mvm->trans, &trans_cfg); - - trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); - trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; - trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; - memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, - sizeof(trans->dbg_conf_tlv)); - trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; - - /* set up notification wait support */ - iwl_notification_wait_init(&mvm->notif_wait); - - /* Init phy db */ - mvm->phy_db = iwl_phy_db_init(trans); - if (!mvm->phy_db) { - IWL_ERR(mvm, "Cannot init phy_db\n"); - goto out_free; - } - - IWL_INFO(mvm, "Detected %s, REV=0x%X\n", - mvm->cfg->name, mvm->trans->hw_rev); - - min_backoff = calc_min_backoff(trans, cfg); - iwl_mvm_tt_initialize(mvm, min_backoff); - - if (iwlwifi_mod_params.nvm_file) - mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; - else - IWL_DEBUG_EEPROM(mvm->trans->dev, - "working without external nvm file\n"); - - if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, - "not allowing power-up and not having nvm_file\n")) - goto out_free; - - /* - * Even if nvm exists in the nvm_file driver should read again the nvm - * from the nic because there might be entries that exist in the OTP - * and not in the file. - * for nics with no_power_up_nic_in_init: rely completley on nvm_file - */ - if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) { - err = iwl_nvm_init(mvm, false); - if (err) - goto out_free; - } else { - err = iwl_trans_start_hw(mvm->trans); - if (err) - goto out_free; - - mutex_lock(&mvm->mutex); - err = iwl_run_init_mvm_ucode(mvm, true); - if (!err || !iwlmvm_mod_params.init_dbg) - iwl_trans_stop_device(trans); - mutex_unlock(&mvm->mutex); - /* returns 0 if successful, 1 if success but in rfkill */ - if (err < 0 && !iwlmvm_mod_params.init_dbg) { - IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); - goto out_free; - } - } - - scan_size = iwl_mvm_scan_size(mvm); - - mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); - if (!mvm->scan_cmd) - goto out_free; - - /* Set EBS as successful as long as not stated otherwise by the FW. */ - mvm->last_ebs_successful = true; - - err = iwl_mvm_mac_setup_register(mvm); - if (err) - goto out_free; - - err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); - if (err) - goto out_unregister; - - memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); - - /* rpm starts with a taken ref. only set the appropriate bit here. */ - mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1; - - iwl_mvm_tof_init(mvm); - - return op_mode; - - out_unregister: - ieee80211_unregister_hw(mvm->hw); - iwl_mvm_leds_exit(mvm); - out_free: - flush_delayed_work(&mvm->fw_dump_wk); - iwl_phy_db_free(mvm->phy_db); - kfree(mvm->scan_cmd); - if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) - iwl_trans_op_mode_leave(trans); - ieee80211_free_hw(mvm->hw); - return NULL; -} - -static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - int i; - - iwl_mvm_leds_exit(mvm); - - iwl_mvm_tt_exit(mvm); - - ieee80211_unregister_hw(mvm->hw); - - kfree(mvm->scan_cmd); - kfree(mvm->mcast_filter_cmd); - mvm->mcast_filter_cmd = NULL; - -#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) - kfree(mvm->d3_resume_sram); - if (mvm->nd_config) { - kfree(mvm->nd_config->match_sets); - kfree(mvm->nd_config->scan_plans); - kfree(mvm->nd_config); - mvm->nd_config = NULL; - } -#endif - - iwl_trans_op_mode_leave(mvm->trans); - - iwl_phy_db_free(mvm->phy_db); - mvm->phy_db = NULL; - - iwl_free_nvm_data(mvm->nvm_data); - for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) - kfree(mvm->nvm_sections[i].data); - - iwl_mvm_tof_clean(mvm); - - ieee80211_free_hw(mvm->hw); -} - -struct iwl_async_handler_entry { - struct list_head list; - struct iwl_rx_cmd_buffer rxb; - void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -}; - -void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) -{ - struct iwl_async_handler_entry *entry, *tmp; - - spin_lock_bh(&mvm->async_handlers_lock); - list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { - iwl_free_rxb(&entry->rxb); - list_del(&entry->list); - kfree(entry); - } - spin_unlock_bh(&mvm->async_handlers_lock); -} - -static void iwl_mvm_async_handlers_wk(struct work_struct *wk) -{ - struct iwl_mvm *mvm = - container_of(wk, struct iwl_mvm, async_handlers_wk); - struct iwl_async_handler_entry *entry, *tmp; - struct list_head local_list; - - INIT_LIST_HEAD(&local_list); - - /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ - mutex_lock(&mvm->mutex); - - /* - * Sync with Rx path with a lock. Remove all the entries from this list, - * add them to a local one (lock free), and then handle them. - */ - spin_lock_bh(&mvm->async_handlers_lock); - list_splice_init(&mvm->async_handlers_list, &local_list); - spin_unlock_bh(&mvm->async_handlers_lock); - - list_for_each_entry_safe(entry, tmp, &local_list, list) { - entry->fn(mvm, &entry->rxb); - iwl_free_rxb(&entry->rxb); - list_del(&entry->list); - kfree(entry); - } - mutex_unlock(&mvm->mutex); -} - -static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_cmd *cmds_trig; - int i; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); - cmds_trig = (void *)trig->data; - - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) - return; - - for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { - /* don't collect on CMD 0 */ - if (!cmds_trig->cmds[i].cmd_id) - break; - - if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || - cmds_trig->cmds[i].group_id != pkt->hdr.group_id) - continue; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "CMD 0x%02x.%02x received", - pkt->hdr.group_id, pkt->hdr.cmd); - break; - } -} - -static void iwl_mvm_rx_common(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_rx_packet *pkt) -{ - int i; - - iwl_mvm_rx_check_trigger(mvm, pkt); - - /* - * Do the notification wait before RX handlers so - * even if the RX handler consumes the RXB we have - * access to it in the notification wait entry. - */ - iwl_notification_wait_notify(&mvm->notif_wait, pkt); - - for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { - const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; - struct iwl_async_handler_entry *entry; - - if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) - continue; - - if (!rx_h->async) { - rx_h->fn(mvm, rxb); - return; - } - - entry = kzalloc(sizeof(*entry), GFP_ATOMIC); - /* we can't do much... */ - if (!entry) - return; - - entry->rxb._page = rxb_steal_page(rxb); - entry->rxb._offset = rxb->_offset; - entry->rxb._rx_page_order = rxb->_rx_page_order; - entry->fn = rx_h->fn; - spin_lock(&mvm->async_handlers_lock); - list_add_tail(&entry->list, &mvm->async_handlers_list); - spin_unlock(&mvm->async_handlers_lock); - schedule_work(&mvm->async_handlers_wk); - break; - } -} - -static void iwl_mvm_rx(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) - iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); - else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) - iwl_mvm_rx_rx_phy_cmd(mvm, rxb); - else - iwl_mvm_rx_common(mvm, rxb, pkt); -} - -static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) - iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); - else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) - iwl_mvm_rx_rx_phy_cmd(mvm, rxb); - else - iwl_mvm_rx_common(mvm, rxb, pkt); -} - -static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq; - int q; - - spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[queue].hw_queue_to_mac80211; - spin_unlock_bh(&mvm->queue_info_lock); - - if (WARN_ON_ONCE(!mq)) - return; - - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { - if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { - IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) already stopped\n", - queue, q); - continue; - } - - ieee80211_stop_queue(mvm->hw, q); - } -} - -static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq; - int q; - - spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[queue].hw_queue_to_mac80211; - spin_unlock_bh(&mvm->queue_info_lock); - - if (WARN_ON_ONCE(!mq)) - return; - - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { - if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { - IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) still stopped\n", - queue, q); - continue; - } - - ieee80211_wake_queue(mvm->hw, q); - } -} - -void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) -{ - if (state) - set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); - else - clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); - - wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); -} - -static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - bool calibrating = ACCESS_ONCE(mvm->calibrating); - - if (state) - set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); - else - clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); - - wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); - - /* iwl_run_init_mvm_ucode is waiting for results, abort it */ - if (calibrating) - iwl_abort_notification_waits(&mvm->notif_wait); - - /* - * Stop the device if we run OPERATIONAL firmware or if we are in the - * middle of the calibrations. - */ - return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); -} - -static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct ieee80211_tx_info *info; - - info = IEEE80211_SKB_CB(skb); - iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); - ieee80211_free_txskb(mvm->hw, skb); -} - -struct iwl_mvm_reprobe { - struct device *dev; - struct work_struct work; -}; - -static void iwl_mvm_reprobe_wk(struct work_struct *wk) -{ - struct iwl_mvm_reprobe *reprobe; - - reprobe = container_of(wk, struct iwl_mvm_reprobe, work); - if (device_reprobe(reprobe->dev)) - dev_err(reprobe->dev, "reprobe failed!\n"); - kfree(reprobe); - module_put(THIS_MODULE); -} - -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) -{ - struct iwl_mvm *mvm = - container_of(work, struct iwl_mvm, fw_dump_wk.work); - - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) - return; - - mutex_lock(&mvm->mutex); - - /* stop recording */ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - } else { - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); - /* wait before we collect the data till the DBGC stop */ - udelay(100); - } - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && - mvm->fw->dbg_dest_tlv && - iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); - - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); -} - -void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) -{ - iwl_abort_notification_waits(&mvm->notif_wait); - - /* - * This is a bit racy, but worst case we tell mac80211 about - * a stopped/aborted scan when that was already done which - * is not a problem. It is necessary to abort any os scan - * here because mac80211 requires having the scan cleared - * before restarting. - * We'll reset the scan_status to NONE in restart cleanup in - * the next start() call from mac80211. If restart isn't called - * (no fw restart) scan status will stay busy. - */ - iwl_mvm_report_scan_aborted(mvm); - - /* - * If we're restarting already, don't cycle restarts. - * If INIT fw asserted, it will likely fail again. - * If WoWLAN fw asserted, don't restart either, mac80211 - * can't recover this since we're already half suspended. - */ - if (!mvm->restart_fw && fw_error) { - iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, - NULL); - } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)) { - struct iwl_mvm_reprobe *reprobe; - - IWL_ERR(mvm, - "Firmware error during reconfiguration - reprobe!\n"); - - /* - * get a module reference to avoid doing this while unloading - * anyway and to avoid scheduling a work with code that's - * being removed. - */ - if (!try_module_get(THIS_MODULE)) { - IWL_ERR(mvm, "Module is being unloaded - abort\n"); - return; - } - - reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); - if (!reprobe) { - module_put(THIS_MODULE); - return; - } - reprobe->dev = mvm->trans->dev; - INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); - schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { - /* don't let the transport/FW power down */ - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - - if (fw_error && mvm->restart_fw > 0) - mvm->restart_fw--; - ieee80211_restart_hw(mvm->hw); - } -} - -static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - iwl_mvm_dump_nic_error_log(mvm); - - iwl_mvm_nic_restart(mvm, true); -} - -static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - WARN_ON(1); - iwl_mvm_nic_restart(mvm, true); -} - -struct iwl_d0i3_iter_data { - struct iwl_mvm *mvm; - u8 ap_sta_id; - u8 vif_count; - u8 offloading_tid; - bool disable_offloading; -}; - -static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_d0i3_iter_data *iter_data) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_sta *ap_sta; - struct iwl_mvm_sta *mvmsta; - u32 available_tids = 0; - u8 tid; - - if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) - return false; - - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (IS_ERR_OR_NULL(ap_sta)) - return false; - - mvmsta = iwl_mvm_sta_from_mac80211(ap_sta); - spin_lock_bh(&mvmsta->lock); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - - /* - * in case of pending tx packets, don't use this tid - * for offloading in order to prevent reuse of the same - * qos seq counters. - */ - if (iwl_mvm_tid_queued(tid_data)) - continue; - - if (tid_data->state != IWL_AGG_OFF) - continue; - - available_tids |= BIT(tid); - } - spin_unlock_bh(&mvmsta->lock); - - /* - * disallow protocol offloading if we have no available tid - * (with no pending frames and no active aggregation, - * as we don't handle "holes" properly - the scheduler needs the - * frame's seq number and TFD index to match) - */ - if (!available_tids) - return true; - - /* for simplicity, just use the first available tid */ - iter_data->offloading_tid = ffs(available_tids) - 1; - return false; -} - -static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_d0i3_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; - - IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr); - if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) - return; - - /* - * in case of pending tx packets or active aggregations, - * avoid offloading features in order to prevent reuse of - * the same qos seq counters. - */ - if (iwl_mvm_disallow_offloading(mvm, vif, data)) - data->disable_offloading = true; - - iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); - iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags); - - /* - * on init/association, mvm already configures POWER_TABLE_CMD - * and REPLY_MCAST_FILTER_CMD, so currently don't - * reconfigure them (we might want to use different - * params later on, though). - */ - data->ap_sta_id = mvmvif->ap_sta_id; - data->vif_count++; -} - -static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, - struct iwl_wowlan_config_cmd *cmd, - struct iwl_d0i3_iter_data *iter_data) -{ - struct ieee80211_sta *ap_sta; - struct iwl_mvm_sta *mvm_ap_sta; - - if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - rcu_read_lock(); - - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]); - if (IS_ERR_OR_NULL(ap_sta)) - goto out; - - mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); - cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; - cmd->offloading_tid = iter_data->offloading_tid; - - /* - * The d0i3 uCode takes care of the nonqos counters, - * so configure only the qos seq ones. - */ - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); -out: - rcu_read_unlock(); -} - -int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; - int ret; - struct iwl_d0i3_iter_data d0i3_iter_data = { - .mvm = mvm, - }; - struct iwl_wowlan_config_cmd wowlan_config_cmd = { - .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | - IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE | - IWL_WOWLAN_WAKEUP_BCN_FILTERING), - }; - struct iwl_d3_manager_config d3_cfg_cmd = { - .min_sleep_time = cpu_to_le32(1000), - .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR), - }; - - IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - - set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - - /* - * iwl_mvm_ref_sync takes a reference before checking the flag. - * so by checking there is no held reference we prevent a state - * in which iwl_mvm_ref_sync continues successfully while we - * configure the firmware to enter d0i3 - */ - if (iwl_mvm_ref_taken(mvm)) { - IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - wake_up(&mvm->d0i3_exit_waitq); - return 1; - } - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_enter_d0i3_iterator, - &d0i3_iter_data); - if (d0i3_iter_data.vif_count == 1) { - mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id; - mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; - } else { - WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - mvm->d0i3_offloading = false; - } - - /* make sure we have no running tx while configuring the seqno */ - synchronize_net(); - - /* configure wowlan configuration only if needed */ - if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { - iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, - &d0i3_iter_data); - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, - sizeof(wowlan_config_cmd), - &wowlan_config_cmd); - if (ret) - return ret; - } - - return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, - flags | CMD_MAKE_TRANS_IDLE, - sizeof(d3_cfg_cmd), &d3_cfg_cmd); -} - -static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = _data; - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO; - - IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr); - if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) - return; - - iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); -} - -struct iwl_mvm_wakeup_reason_iter_data { - struct iwl_mvm *mvm; - u32 wakeup_reasons; -}; - -static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_wakeup_reason_iter_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc && - data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) { - if (data->wakeup_reasons & - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) - iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); - else - ieee80211_beacon_loss(vif); - } -} - -void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) -{ - struct ieee80211_sta *sta = NULL; - struct iwl_mvm_sta *mvm_ap_sta; - int i; - bool wake_queues = false; - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->d0i3_tx_lock); - - if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT) - goto out; - - IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); - - /* get the sta in order to update seq numbers and re-enqueue skbs */ - sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - if (IS_ERR_OR_NULL(sta)) { - sta = NULL; - goto out; - } - - if (mvm->d0i3_offloading && qos_seq) { - /* update qos seq numbers if offloading was enabled */ - mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta); - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = le16_to_cpu(qos_seq[i]); - /* firmware stores last-used one, we store next one */ - seq += 0x10; - mvm_ap_sta->tid_data[i].seq_number = seq; - } - } -out: - /* re-enqueue (or drop) all packets */ - while (!skb_queue_empty(&mvm->d0i3_tx)) { - struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx); - - if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) - ieee80211_free_txskb(mvm->hw, skb); - - /* if the skb_queue is not empty, we need to wake queues */ - wake_queues = true; - } - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - wake_up(&mvm->d0i3_exit_waitq); - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - if (wake_queues) - ieee80211_wake_queues(mvm->hw); - - spin_unlock_bh(&mvm->d0i3_tx_lock); -} - -static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); - struct iwl_host_cmd get_status_cmd = { - .id = WOWLAN_GET_STATUSES, - .flags = CMD_HIGH_PRIO | CMD_WANT_SKB, - }; - struct iwl_wowlan_status *status; - int ret; - u32 handled_reasons, wakeup_reasons = 0; - __le16 *qos_seq = NULL; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd(mvm, &get_status_cmd); - if (ret) - goto out; - - if (!get_status_cmd.resp_pkt) - goto out; - - status = (void *)get_status_cmd.resp_pkt->data; - wakeup_reasons = le32_to_cpu(status->wakeup_reasons); - qos_seq = status->qos_seq_ctr; - - IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); - - handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; - if (wakeup_reasons & handled_reasons) { - struct iwl_mvm_wakeup_reason_iter_data data = { - .mvm = mvm, - .wakeup_reasons = wakeup_reasons, - }; - - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d0i3_wakeup_reason_iter, &data); - } -out: - iwl_mvm_d0i3_enable_tx(mvm, qos_seq); - - IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", - wakeup_reasons); - - /* qos_seq might point inside resp_pkt, so free it only now */ - if (get_status_cmd.resp_pkt) - iwl_free_resp(&get_status_cmd); - - /* the FW might have updated the regdomain */ - iwl_mvm_update_changed_regdom(mvm); - - iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); - mutex_unlock(&mvm->mutex); -} - -int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) -{ - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | - CMD_WAKE_UP_TRANS; - int ret; - - IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); - - mutex_lock(&mvm->d0i3_suspend_mutex); - if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { - IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); - __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - return 0; - } - mutex_unlock(&mvm->d0i3_suspend_mutex); - - ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); - if (ret) - goto out; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_exit_d0i3_iterator, - mvm); -out: - schedule_work(&mvm->d0i3_exit_work); - return ret; -} - -int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK); - return _iwl_mvm_exit_d0i3(mvm); -} - -#define IWL_MVM_COMMON_OPS \ - /* these could be differentiated */ \ - .queue_full = iwl_mvm_stop_sw_queue, \ - .queue_not_full = iwl_mvm_wake_sw_queue, \ - .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ - .free_skb = iwl_mvm_free_skb, \ - .nic_error = iwl_mvm_nic_error, \ - .cmd_queue_full = iwl_mvm_cmd_queue_full, \ - .nic_config = iwl_mvm_nic_config, \ - .enter_d0i3 = iwl_mvm_enter_d0i3, \ - .exit_d0i3 = iwl_mvm_exit_d0i3, \ - /* as we only register one, these MUST be common! */ \ - .start = iwl_op_mode_mvm_start, \ - .stop = iwl_op_mode_mvm_stop - -static const struct iwl_op_mode_ops iwl_mvm_ops = { - IWL_MVM_COMMON_OPS, - .rx = iwl_mvm_rx, -}; - -static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb, - unsigned int queue) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); -} - -static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { - IWL_MVM_COMMON_OPS, - .rx = iwl_mvm_rx_mq, - .rx_rss = iwl_mvm_rx_mq_rss, -}; diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c deleted file mode 100644 index e68a475e3071..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ /dev/null @@ -1,295 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include "fw-api.h" -#include "mvm.h" - -/* Maps the driver specific channel width definition to the fw values */ -u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) -{ - switch (chandef->width) { - case NL80211_CHAN_WIDTH_20_NOHT: - case NL80211_CHAN_WIDTH_20: - return PHY_VHT_CHANNEL_MODE20; - case NL80211_CHAN_WIDTH_40: - return PHY_VHT_CHANNEL_MODE40; - case NL80211_CHAN_WIDTH_80: - return PHY_VHT_CHANNEL_MODE80; - case NL80211_CHAN_WIDTH_160: - return PHY_VHT_CHANNEL_MODE160; - default: - WARN(1, "Invalid channel width=%u", chandef->width); - return PHY_VHT_CHANNEL_MODE20; - } -} - -/* - * Maps the driver specific control channel position (relative to the center - * freq) definitions to the the fw values - */ -u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) -{ - switch (chandef->chan->center_freq - chandef->center_freq1) { - case -70: - return PHY_VHT_CTRL_POS_4_BELOW; - case -50: - return PHY_VHT_CTRL_POS_3_BELOW; - case -30: - return PHY_VHT_CTRL_POS_2_BELOW; - case -10: - return PHY_VHT_CTRL_POS_1_BELOW; - case 10: - return PHY_VHT_CTRL_POS_1_ABOVE; - case 30: - return PHY_VHT_CTRL_POS_2_ABOVE; - case 50: - return PHY_VHT_CTRL_POS_3_ABOVE; - case 70: - return PHY_VHT_CTRL_POS_4_ABOVE; - default: - WARN(1, "Invalid channel definition"); - case 0: - /* - * The FW is expected to check the control channel position only - * when in HT/VHT and the channel width is not 20MHz. Return - * this value as the default one. - */ - return PHY_VHT_CTRL_POS_1_BELOW; - } -} - -/* - * Construct the generic fields of the PHY context command - */ -static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, - struct iwl_phy_context_cmd *cmd, - u32 action, u32 apply_time) -{ - memset(cmd, 0, sizeof(struct iwl_phy_context_cmd)); - - cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, - ctxt->color)); - cmd->action = cpu_to_le32(action); - cmd->apply_time = cpu_to_le32(apply_time); -} - -/* - * Add the phy configuration to the PHY context command - */ -static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, - struct iwl_phy_context_cmd *cmd, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic) -{ - u8 active_cnt, idle_cnt; - - /* Set the channel info data */ - cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? - PHY_BAND_24 : PHY_BAND_5); - - cmd->ci.channel = chandef->chan->hw_value; - cmd->ci.width = iwl_mvm_get_channel_width(chandef); - cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); - - /* Set rx the chains */ - idle_cnt = chains_static; - active_cnt = chains_dynamic; - - /* In scenarios where we only ever use a single-stream rates, - * i.e. legacy 11b/g/a associations, single-stream APs or even - * static SMPS, enable both chains to get diversity, improving - * the case where we're far enough from the AP that attenuation - * between the two antennas is sufficiently different to impact - * performance. - */ - if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) { - idle_cnt = 2; - active_cnt = 2; - } - - cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << - PHY_RX_CHAIN_VALID_POS); - cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); - cmd->rxchain_info |= cpu_to_le32(active_cnt << - PHY_RX_CHAIN_MIMO_CNT_POS); -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (unlikely(mvm->dbgfs_rx_phyinfo)) - cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); -#endif - - cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); -} - -/* - * Send a command to apply the current phy configuration. The command is send - * only if something in the configuration changed: in case that this is the - * first time that the phy configuration is applied or in case that the phy - * configuration changed from the previous apply. - */ -static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic, - u32 action, u32 apply_time) -{ - struct iwl_phy_context_cmd cmd; - int ret; - - /* Set the command header fields */ - iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); - - /* Set the command data */ - iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, - chains_static, chains_dynamic); - - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, - sizeof(struct iwl_phy_context_cmd), - &cmd); - if (ret) - IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); - return ret; -} - -/* - * Send a command to add a PHY context based on the current HW configuration. - */ -int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic) -{ - WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - ctxt->ref); - lockdep_assert_held(&mvm->mutex); - - ctxt->channel = chandef->chan; - - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, - chains_static, chains_dynamic, - FW_CTXT_ACTION_ADD, 0); -} - -/* - * Update the number of references to the given PHY context. This is valid only - * in case the PHY context was already created, i.e., its reference count > 0. - */ -void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) -{ - lockdep_assert_held(&mvm->mutex); - ctxt->ref++; -} - -/* - * Send a command to modify the PHY context based on the current HW - * configuration. Note that the function does not check that the configuration - * changed. - */ -int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic) -{ - lockdep_assert_held(&mvm->mutex); - - ctxt->channel = chandef->chan; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, - chains_static, chains_dynamic, - FW_CTXT_ACTION_MODIFY, 0); -} - -void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) -{ - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON_ONCE(!ctxt)) - return; - - ctxt->ref--; -} - -static void iwl_mvm_binding_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - unsigned long *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (!mvmvif->phy_ctxt) - return; - - if (vif->type == NL80211_IFTYPE_STATION || - vif->type == NL80211_IFTYPE_AP) - __set_bit(mvmvif->phy_ctxt->id, data); -} - -int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm) -{ - unsigned long phy_ctxt_counter = 0; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_binding_iterator, - &phy_ctxt_counter); - - return hweight8(phy_ctxt_counter); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c deleted file mode 100644 index bed9696ee410..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ /dev/null @@ -1,1040 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include -#include -#include - -#include - -#include "iwl-debug.h" -#include "mvm.h" -#include "iwl-modparams.h" -#include "fw-api-power.h" - -#define POWER_KEEP_ALIVE_PERIOD_SEC 25 - -static -int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, - struct iwl_beacon_filter_cmd *cmd, - u32 flags) -{ - IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", - le32_to_cpu(cmd->ba_enable_beacon_abort)); - IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", - le32_to_cpu(cmd->ba_escape_timer)); - IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", - le32_to_cpu(cmd->bf_debug_flag)); - IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", - le32_to_cpu(cmd->bf_enable_beacon_filter)); - IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", - le32_to_cpu(cmd->bf_energy_delta)); - IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", - le32_to_cpu(cmd->bf_escape_timer)); - IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", - le32_to_cpu(cmd->bf_roaming_energy_delta)); - IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", - le32_to_cpu(cmd->bf_roaming_state)); - IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", - le32_to_cpu(cmd->bf_temp_threshold)); - IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", - le32_to_cpu(cmd->bf_temp_fast_filter)); - IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", - le32_to_cpu(cmd->bf_temp_slow_filter)); - - return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags, - sizeof(struct iwl_beacon_filter_cmd), cmd); -} - -static -void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd, - bool d0i3) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (vif->bss_conf.cqm_rssi_thold && !d0i3) { - cmd->bf_energy_delta = - cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); - /* fw uses an absolute value for this */ - cmd->bf_roaming_state = - cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); - } - cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); -} - -static void iwl_mvm_power_log(struct iwl_mvm *mvm, - struct iwl_mac_power_cmd *cmd) -{ - IWL_DEBUG_POWER(mvm, - "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", - cmd->id_and_color, iwlmvm_mod_params.power_scheme, - le16_to_cpu(cmd->flags)); - IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", - le16_to_cpu(cmd->keep_alive_seconds)); - - if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { - IWL_DEBUG_POWER(mvm, "Disable power management\n"); - return; - } - - IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", - le32_to_cpu(cmd->rx_data_timeout)); - IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", - le32_to_cpu(cmd->tx_data_timeout)); - if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) - IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", - cmd->skip_dtim_periods); - if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) - IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", - cmd->lprx_rssi_threshold); - if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { - IWL_DEBUG_POWER(mvm, "uAPSD enabled\n"); - IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", - le32_to_cpu(cmd->rx_data_timeout_uapsd)); - IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", - le32_to_cpu(cmd->tx_data_timeout_uapsd)); - IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid); - IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags); - IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp); - } -} - -static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_power_cmd *cmd) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - enum ieee80211_ac_numbers ac; - bool tid_found = false; - - for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { - if (!mvmvif->queue_params[ac].uapsd) - continue; - - if (mvm->cur_ucode != IWL_UCODE_WOWLAN) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); - - cmd->uapsd_ac_flags |= BIT(ac); - - /* QNDP TID - the highest TID with no admission control */ - if (!tid_found && !mvmvif->queue_params[ac].acm) { - tid_found = true; - switch (ac) { - case IEEE80211_AC_VO: - cmd->qndp_tid = 6; - break; - case IEEE80211_AC_VI: - cmd->qndp_tid = 5; - break; - case IEEE80211_AC_BE: - cmd->qndp_tid = 0; - break; - case IEEE80211_AC_BK: - cmd->qndp_tid = 1; - break; - } - } - } - - if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* set advanced pm flag with no uapsd ACs to enable ps-poll */ - if (mvmvif->dbgfs_pm.use_ps_poll) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); -#endif - return; - } - - cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); - - if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | - BIT(IEEE80211_AC_VI) | - BIT(IEEE80211_AC_BE) | - BIT(IEEE80211_AC_BK))) { - cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); - cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); - cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? - cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : - cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); - } - - cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; - - if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & - cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { - cmd->rx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); - cmd->tx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); - } else { - cmd->rx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); - cmd->tx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); - } - - if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { - cmd->heavy_tx_thld_packets = - IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS; - cmd->heavy_rx_thld_packets = - IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS; - } else { - cmd->heavy_tx_thld_packets = - IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; - cmd->heavy_rx_thld_packets = - IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; - } - cmd->heavy_tx_thld_percentage = - IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; - cmd->heavy_rx_thld_percentage = - IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; -} - -static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, - ETH_ALEN)) - return false; - - if (vif->p2p && - !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD)) - return false; - /* - * Avoid using uAPSD if P2P client is associated to GO that uses - * opportunistic power save. This is due to current FW limitation. - */ - if (vif->p2p && - (vif->bss_conf.p2p_noa_attr.oppps_ctwindow & - IEEE80211_P2P_OPPPS_ENABLE_BIT)) - return false; - - /* - * Avoid using uAPSD if client is in DCM - - * low latency issue in Miracast - */ - if (iwl_mvm_phy_ctx_count(mvm) >= 2) - return false; - - return true; -} - -static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) -{ - struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_channel *chan; - bool radar_detect = false; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - WARN_ON(!chanctx_conf); - if (chanctx_conf) { - chan = chanctx_conf->def.chan; - radar_detect = chan->flags & IEEE80211_CHAN_RADAR; - } - rcu_read_unlock(); - - return radar_detect; -} - -static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_power_cmd *cmd, - bool host_awake) -{ - int dtimper = vif->bss_conf.dtim_period ?: 1; - int skip; - - /* disable, in case we're supposed to override */ - cmd->skip_dtim_periods = 0; - cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); - - if (iwl_mvm_power_is_radar(vif)) - return; - - if (dtimper >= 10) - return; - - /* TODO: check that multicast wake lock is off */ - - if (host_awake) { - if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) - return; - skip = 2; - } else { - int dtimper_tu = dtimper * vif->bss_conf.beacon_int; - - if (WARN_ON(!dtimper_tu)) - return; - /* configure skip over dtim up to 306TU - 314 msec */ - skip = max_t(u8, 1, 306 / dtimper_tu); - } - - /* the firmware really expects "look at every X DTIMs", so add 1 */ - cmd->skip_dtim_periods = 1 + skip; - cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); -} - -static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_power_cmd *cmd, - bool host_awake) -{ - int dtimper, bi; - int keep_alive; - struct iwl_mvm_vif *mvmvif __maybe_unused = - iwl_mvm_vif_from_mac80211(vif); - - cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)); - dtimper = vif->bss_conf.dtim_period; - bi = vif->bss_conf.beacon_int; - - /* - * Regardless of power management state the driver must set - * keep alive period. FW will use it for sending keep alive NDPs - * immediately after association. Check that keep alive period - * is at least 3 * DTIM - */ - keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), - USEC_PER_SEC); - keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC); - cmd->keep_alive_seconds = cpu_to_le16(keep_alive); - - if (mvm->ps_disabled) - return; - - cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); - - if (!vif->bss_conf.ps || !mvmvif->pm_enabled) - return; - - if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && - (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) || - !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE)) - return; - - cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); - - if (vif->bss_conf.beacon_rate && - (vif->bss_conf.beacon_rate->bitrate == 10 || - vif->bss_conf.beacon_rate->bitrate == 60)) { - cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); - cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; - } - - iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake); - - if (!host_awake) { - cmd->rx_data_timeout = - cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); - cmd->tx_data_timeout = - cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); - } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) { - cmd->tx_data_timeout = - cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT); - cmd->rx_data_timeout = - cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT); - } else { - cmd->rx_data_timeout = - cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT); - cmd->tx_data_timeout = - cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT); - } - - if (iwl_mvm_power_allow_uapsd(mvm, vif)) - iwl_mvm_power_configure_uapsd(mvm, vif, cmd); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) - cmd->keep_alive_seconds = - cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds); - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { - if (mvmvif->dbgfs_pm.skip_over_dtim) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); - else - cmd->flags &= - cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); - } - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) - cmd->rx_data_timeout = - cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) - cmd->tx_data_timeout = - cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) - cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods; - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { - if (mvmvif->dbgfs_pm.lprx_ena) - cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); - else - cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); - } - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) - cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) { - if (mvmvif->dbgfs_pm.snooze_ena) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); - else - cmd->flags &= - cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK); - } - if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) { - u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK; - if (mvmvif->dbgfs_pm.uapsd_misbehaving) - cmd->flags |= cpu_to_le16(flag); - else - cmd->flags &= cpu_to_le16(flag); - } -#endif /* CONFIG_IWLWIFI_DEBUGFS */ -} - -static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mac_power_cmd cmd = {}; - - iwl_mvm_power_build_cmd(mvm, vif, &cmd, - mvm->cur_ucode != IWL_UCODE_WOWLAN); - iwl_mvm_power_log(mvm, &cmd); -#ifdef CONFIG_IWLWIFI_DEBUGFS - memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); -#endif - - return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0, - sizeof(cmd), &cmd); -} - -int iwl_mvm_power_update_device(struct iwl_mvm *mvm) -{ - struct iwl_device_power_cmd cmd = { - .flags = 0, - }; - - if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) - mvm->ps_disabled = true; - - if (!mvm->ps_disabled) - cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : - mvm->disable_power_off) - cmd.flags &= - cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); -#endif - IWL_DEBUG_POWER(mvm, - "Sending device power command with flags = 0x%X\n", - cmd.flags); - - return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd), - &cmd); -} - -void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, - ETH_ALEN)) - eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); -} - -static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - u8 *ap_sta_id = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - /* The ap_sta_id is not expected to change during current association - * so no explicit protection is needed - */ - if (mvmvif->ap_sta_id == *ap_sta_id) - memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, - ETH_ALEN); -} - -void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; - u8 ap_sta_id = le32_to_cpu(notif->sta_id); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id); -} - -struct iwl_power_vifs { - struct iwl_mvm *mvm; - struct ieee80211_vif *bf_vif; - struct ieee80211_vif *bss_vif; - struct ieee80211_vif *p2p_vif; - struct ieee80211_vif *ap_vif; - struct ieee80211_vif *monitor_vif; - bool p2p_active; - bool bss_active; - bool ap_active; - bool monitor_active; -}; - -static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->pm_enabled = false; -} - -static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - bool *disable_ps = _data; - - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - *disable_ps |= mvmvif->ps_disabled; -} - -static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_power_vifs *power_iterator = _data; - - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_P2P_DEVICE: - break; - - case NL80211_IFTYPE_P2P_GO: - case NL80211_IFTYPE_AP: - /* only a single MAC of the same type */ - WARN_ON(power_iterator->ap_vif); - power_iterator->ap_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->ap_active = true; - break; - - case NL80211_IFTYPE_MONITOR: - /* only a single MAC of the same type */ - WARN_ON(power_iterator->monitor_vif); - power_iterator->monitor_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->monitor_active = true; - break; - - case NL80211_IFTYPE_P2P_CLIENT: - /* only a single MAC of the same type */ - WARN_ON(power_iterator->p2p_vif); - power_iterator->p2p_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->p2p_active = true; - break; - - case NL80211_IFTYPE_STATION: - /* only a single MAC of the same type */ - WARN_ON(power_iterator->bss_vif); - power_iterator->bss_vif = vif; - if (mvmvif->phy_ctxt) - if (mvmvif->phy_ctxt->id < MAX_PHYS) - power_iterator->bss_active = true; - - if (mvmvif->bf_data.bf_enabled && - !WARN_ON(power_iterator->bf_vif)) - power_iterator->bf_vif = vif; - - break; - - default: - break; - } -} - -static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) -{ - struct iwl_mvm_vif *bss_mvmvif = NULL; - struct iwl_mvm_vif *p2p_mvmvif = NULL; - struct iwl_mvm_vif *ap_mvmvif = NULL; - bool client_same_channel = false; - bool ap_same_channel = false; - - lockdep_assert_held(&mvm->mutex); - - /* set pm_enable to false */ - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_disable_pm_iterator, - NULL); - - if (vifs->bss_vif) - bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); - - if (vifs->p2p_vif) - p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif); - - if (vifs->ap_vif) - ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); - - /* don't allow PM if any TDLS stations exist */ - if (iwl_mvm_tdls_sta_count(mvm, NULL)) - return; - - /* enable PM on bss if bss stand alone */ - if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { - bss_mvmvif->pm_enabled = true; - return; - } - - /* enable PM on p2p if p2p stand alone */ - if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) - p2p_mvmvif->pm_enabled = true; - return; - } - - if (vifs->bss_active && vifs->p2p_active) - client_same_channel = (bss_mvmvif->phy_ctxt->id == - p2p_mvmvif->phy_ctxt->id); - if (vifs->bss_active && vifs->ap_active) - ap_same_channel = (bss_mvmvif->phy_ctxt->id == - ap_mvmvif->phy_ctxt->id); - - /* clients are not stand alone: enable PM if DCM */ - if (!(client_same_channel || ap_same_channel) && - (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) { - if (vifs->bss_active) - bss_mvmvif->pm_enabled = true; - if (vifs->p2p_active && - (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)) - p2p_mvmvif->pm_enabled = true; - return; - } - - /* - * There is only one channel in the system and there are only - * bss and p2p clients that share it - */ - if (client_same_channel && !vifs->ap_active && - (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) { - /* share same channel*/ - bss_mvmvif->pm_enabled = true; - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) - p2p_mvmvif->pm_enabled = true; - } -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, char *buf, - int bufsz) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mac_power_cmd cmd = {}; - int pos = 0; - - mutex_lock(&mvm->mutex); - memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd)); - mutex_unlock(&mvm->mutex); - - pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", - iwlmvm_mod_params.power_scheme); - pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", - le16_to_cpu(cmd.flags)); - pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", - le16_to_cpu(cmd.keep_alive_seconds)); - - if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) - return pos; - - pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", - (cmd.flags & - cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0); - pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", - cmd.skip_dtim_periods); - if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { - pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", - le32_to_cpu(cmd.rx_data_timeout)); - pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", - le32_to_cpu(cmd.tx_data_timeout)); - } - if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) - pos += scnprintf(buf+pos, bufsz-pos, - "lprx_rssi_threshold = %d\n", - cmd.lprx_rssi_threshold); - - if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) - return pos; - - pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n", - le32_to_cpu(cmd.rx_data_timeout_uapsd)); - pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n", - le32_to_cpu(cmd.tx_data_timeout_uapsd)); - pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid); - pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n", - cmd.uapsd_ac_flags); - pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n", - cmd.uapsd_max_sp); - pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n", - cmd.heavy_tx_thld_packets); - pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n", - cmd.heavy_rx_thld_packets); - pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n", - cmd.heavy_tx_thld_percentage); - pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n", - cmd.heavy_rx_thld_percentage); - pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n", - (cmd.flags & - cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ? - 1 : 0); - - if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK))) - return pos; - - pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n", - cmd.snooze_interval); - pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n", - cmd.snooze_window); - - return pos; -} - -void -iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; - - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) - cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) - cmd->bf_roaming_energy_delta = - cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) - cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) - cmd->bf_temp_threshold = - cpu_to_le32(dbgfs_bf->bf_temp_threshold); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) - cmd->bf_temp_fast_filter = - cpu_to_le32(dbgfs_bf->bf_temp_fast_filter); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) - cmd->bf_temp_slow_filter = - cpu_to_le32(dbgfs_bf->bf_temp_slow_filter); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) - cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag); - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) - cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); - if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) - cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); - if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) - cmd->ba_enable_beacon_abort = - cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort); -} -#endif - -static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd, - u32 cmd_flags, - bool d0i3) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period || - vif->type != NL80211_IFTYPE_STATION || vif->p2p) - return 0; - - iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3); - if (!d0i3) - iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); - ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); - - /* don't change bf_enabled in case of temporary d0i3 configuration */ - if (!ret && !d0i3) - mvmvif->bf_data.bf_enabled = true; - - return ret; -} - -int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) -{ - struct iwl_beacon_filter_cmd cmd = { - IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); -} - -static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_beacon_filter_cmd cmd = { - IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - - if (!mvmvif->bf_data.bf_enabled) - return 0; - - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) - cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - - mvmvif->bf_data.ba_enabled = enable; - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); -} - -int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) -{ - struct iwl_beacon_filter_cmd cmd = {}; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) - return 0; - - ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); - - if (!ret) - mvmvif->bf_data.bf_enabled = false; - - return ret; -} - -static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) -{ - bool disable_ps; - int ret; - - /* disable PS if CAM */ - disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); - /* ...or if any of the vifs require PS to be off */ - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_ps_disabled_iterator, - &disable_ps); - - /* update device power state if it has changed */ - if (mvm->ps_disabled != disable_ps) { - bool old_ps_disabled = mvm->ps_disabled; - - mvm->ps_disabled = disable_ps; - ret = iwl_mvm_power_update_device(mvm); - if (ret) { - mvm->ps_disabled = old_ps_disabled; - return ret; - } - } - - return 0; -} - -static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) -{ - struct iwl_mvm_vif *mvmvif; - bool ba_enable; - - if (!vifs->bf_vif) - return 0; - - mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); - - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vifs->bf_vif->bss_conf.ps || - iwl_mvm_vif_low_latency(mvmvif)); - - return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); -} - -int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) -{ - struct iwl_power_vifs vifs = { - .mvm = mvm, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* get vifs info */ - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_get_vifs_iterator, &vifs); - - ret = iwl_mvm_power_set_ps(mvm); - if (ret) - return ret; - - return iwl_mvm_power_set_ba(mvm, &vifs); -} - -int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) -{ - struct iwl_power_vifs vifs = { - .mvm = mvm, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* get vifs info */ - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_get_vifs_iterator, &vifs); - - iwl_mvm_power_set_pm(mvm, &vifs); - - ret = iwl_mvm_power_set_ps(mvm); - if (ret) - return ret; - - if (vifs.bss_vif) { - ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif); - if (ret) - return ret; - } - - if (vifs.p2p_vif) { - ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif); - if (ret) - return ret; - } - - return iwl_mvm_power_set_ba(mvm, &vifs); -} - -int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, u32 flags) -{ - int ret; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mac_power_cmd cmd = {}; - - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) - return 0; - - if (!vif->bss_conf.assoc) - return 0; - - iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable); - - iwl_mvm_power_log(mvm, &cmd); -#ifdef CONFIG_IWLWIFI_DEBUGFS - memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd)); -#endif - ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags, - sizeof(cmd), &cmd); - if (ret) - return ret; - - /* configure beacon filtering */ - if (mvmvif != mvm->bf_allowed_vif) - return 0; - - if (enable) { - struct iwl_beacon_filter_cmd cmd_bf = { - IWL_BF_CMD_CONFIG_D0I3, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, - flags, true); - } else { - if (mvmvif->bf_data.bf_enabled) - ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags); - else - ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags); - } - - return ret; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c deleted file mode 100644 index 509a66d05245..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ /dev/null @@ -1,328 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include "fw-api.h" -#include "mvm.h" - -#define QUOTA_100 IWL_MVM_MAX_QUOTA -#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100) - -struct iwl_mvm_quota_iterator_data { - int n_interfaces[MAX_BINDINGS]; - int colors[MAX_BINDINGS]; - int low_latency[MAX_BINDINGS]; - int n_low_latency_bindings; - struct ieee80211_vif *disabled_vif; -}; - -static void iwl_mvm_quota_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_quota_iterator_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u16 id; - - /* skip disabled interfaces here immediately */ - if (vif == data->disabled_vif) - return; - - if (!mvmvif->phy_ctxt) - return; - - /* currently, PHY ID == binding ID */ - id = mvmvif->phy_ctxt->id; - - /* need at least one binding per PHY */ - BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS); - - if (WARN_ON_ONCE(id >= MAX_BINDINGS)) - return; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) - break; - return; - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_ADHOC: - if (mvmvif->ap_ibss_active) - break; - return; - case NL80211_IFTYPE_MONITOR: - if (mvmvif->monitor_active) - break; - return; - case NL80211_IFTYPE_P2P_DEVICE: - return; - default: - WARN_ON_ONCE(1); - return; - } - - if (data->colors[id] < 0) - data->colors[id] = mvmvif->phy_ctxt->color; - else - WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); - - data->n_interfaces[id]++; - - if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { - data->n_low_latency_bindings++; - data->low_latency[id] = true; - } -} - -static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, - struct iwl_time_quota_cmd *cmd) -{ -#ifdef CONFIG_NL80211_TESTMODE - struct iwl_mvm_vif *mvmvif; - int i, phy_id = -1, beacon_int = 0; - - if (!mvm->noa_duration || !mvm->noa_vif) - return; - - mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif); - if (!mvmvif->ap_ibss_active) - return; - - phy_id = mvmvif->phy_ctxt->id; - beacon_int = mvm->noa_vif->bss_conf.beacon_int; - - for (i = 0; i < MAX_BINDINGS; i++) { - u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color); - u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS; - u32 quota = le32_to_cpu(cmd->quotas[i].quota); - - if (id != phy_id) - continue; - - quota *= (beacon_int - mvm->noa_duration); - quota /= beacon_int; - - IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", - le32_to_cpu(cmd->quotas[i].quota), quota); - - cmd->quotas[i].quota = cpu_to_le32(quota); - } -#endif -} - -int iwl_mvm_update_quotas(struct iwl_mvm *mvm, - bool force_update, - struct ieee80211_vif *disabled_vif) -{ - struct iwl_time_quota_cmd cmd = {}; - int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat; - struct iwl_mvm_quota_iterator_data data = { - .n_interfaces = {}, - .colors = { -1, -1, -1, -1 }, - .disabled_vif = disabled_vif, - }; - struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd; - bool send = false; - - lockdep_assert_held(&mvm->mutex); - - /* update all upon completion */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - return 0; - - /* iterator data above must match */ - BUILD_BUG_ON(MAX_BINDINGS != 4); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_quota_iterator, &data); - - /* - * The FW's scheduling session consists of - * IWL_MVM_MAX_QUOTA fragments. Divide these fragments - * equally between all the bindings that require quota - */ - num_active_macs = 0; - for (i = 0; i < MAX_BINDINGS; i++) { - cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); - num_active_macs += data.n_interfaces[i]; - } - - n_non_lowlat = num_active_macs; - - if (data.n_low_latency_bindings == 1) { - for (i = 0; i < MAX_BINDINGS; i++) { - if (data.low_latency[i]) { - n_non_lowlat -= data.n_interfaces[i]; - break; - } - } - } - - if (data.n_low_latency_bindings == 1 && n_non_lowlat) { - /* - * Reserve quota for the low latency binding in case that - * there are several data bindings but only a single - * low latency one. Split the rest of the quota equally - * between the other data interfaces. - */ - quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat; - quota_rem = QUOTA_100 - n_non_lowlat * quota - - QUOTA_LOWLAT_MIN; - IWL_DEBUG_QUOTA(mvm, - "quota: low-latency binding active, remaining quota per other binding: %d\n", - quota); - } else if (num_active_macs) { - /* - * There are 0 or more than 1 low latency bindings, or all the - * data interfaces belong to the single low latency binding. - * Split the quota equally between the data interfaces. - */ - quota = QUOTA_100 / num_active_macs; - quota_rem = QUOTA_100 % num_active_macs; - IWL_DEBUG_QUOTA(mvm, - "quota: splitting evenly per binding: %d\n", - quota); - } else { - /* values don't really matter - won't be used */ - quota = 0; - quota_rem = 0; - } - - for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { - if (data.colors[i] < 0) - continue; - - cmd.quotas[idx].id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); - - if (data.n_interfaces[i] <= 0) - cmd.quotas[idx].quota = cpu_to_le32(0); - else if (data.n_low_latency_bindings == 1 && n_non_lowlat && - data.low_latency[i]) - /* - * There is more than one binding, but only one of the - * bindings is in low latency. For this case, allocate - * the minimal required quota for the low latency - * binding. - */ - cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN); - else - cmd.quotas[idx].quota = - cpu_to_le32(quota * data.n_interfaces[i]); - - WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100, - "Binding=%d, quota=%u > max=%u\n", - idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100); - - cmd.quotas[idx].max_duration = cpu_to_le32(0); - - idx++; - } - - /* Give the remainder of the session to the first data binding */ - for (i = 0; i < MAX_BINDINGS; i++) { - if (le32_to_cpu(cmd.quotas[i].quota) != 0) { - le32_add_cpu(&cmd.quotas[i].quota, quota_rem); - IWL_DEBUG_QUOTA(mvm, - "quota: giving remainder of %d to binding %d\n", - quota_rem, i); - break; - } - } - - iwl_mvm_adjust_quota_for_noa(mvm, &cmd); - - /* check that we have non-zero quota for all valid bindings */ - for (i = 0; i < MAX_BINDINGS; i++) { - if (cmd.quotas[i].id_and_color != last->quotas[i].id_and_color) - send = true; - if (cmd.quotas[i].max_duration != last->quotas[i].max_duration) - send = true; - if (abs((int)le32_to_cpu(cmd.quotas[i].quota) - - (int)le32_to_cpu(last->quotas[i].quota)) - > IWL_MVM_QUOTA_THRESHOLD) - send = true; - if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID)) - continue; - WARN_ONCE(cmd.quotas[i].quota == 0, - "zero quota on binding %d\n", i); - } - - if (!send && !force_update) { - /* don't send a practically unchanged command, the firmware has - * to re-initialize a lot of state and that can have an adverse - * impact on it - */ - return 0; - } - - err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd); - - if (err) - IWL_ERR(mvm, "Failed to send quota: %d\n", err); - else - mvm->last_quota_cmd = cmd; - return err; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c deleted file mode 100644 index d1ad10391b47..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ /dev/null @@ -1,3983 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include -#include - -#include -#include -#include - -#include -#include "rs.h" -#include "fw-api.h" -#include "sta.h" -#include "iwl-op-mode.h" -#include "mvm.h" -#include "debugfs.h" - -#define RS_NAME "iwl-mvm-rs" - -#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ - -/* Calculations of success ratio are done in fixed point where 12800 is 100%. - * Use this macro when dealing with thresholds consts set as a percentage - */ -#define RS_PERCENT(x) (128 * x) - -static u8 rs_ht_to_legacy[] = { - [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, - [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX, - [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX, - [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX, - [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX, - [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX, - [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX, - [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX, - [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX, - [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX, -}; - -static const u8 ant_toggle_lookup[] = { - [ANT_NONE] = ANT_NONE, - [ANT_A] = ANT_B, - [ANT_B] = ANT_C, - [ANT_AB] = ANT_BC, - [ANT_C] = ANT_A, - [ANT_AC] = ANT_AB, - [ANT_BC] = ANT_AC, - [ANT_ABC] = ANT_ABC, -}; - -#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \ - [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ - IWL_RATE_HT_SISO_MCS_##s##_PLCP, \ - IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \ - IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \ - IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\ - IWL_RATE_##rp##M_INDEX, \ - IWL_RATE_##rn##M_INDEX } - -#define IWL_DECLARE_MCS_RATE(s) \ - [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \ - IWL_RATE_HT_SISO_MCS_##s##_PLCP, \ - IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \ - IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \ - IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \ - IWL_RATE_INVM_INDEX, \ - IWL_RATE_INVM_INDEX } - -/* - * Parameter order: - * rate, ht rate, prev rate, next rate - * - * If there isn't a valid next or previous rate then INV is used which - * maps to IWL_RATE_INVALID - * - */ -static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { - IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */ - IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */ - IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */ - IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */ - IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */ - IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */ - IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */ - IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */ - IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */ - IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */ - IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */ - IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */ - IWL_DECLARE_MCS_RATE(7), /* MCS 7 */ - IWL_DECLARE_MCS_RATE(8), /* MCS 8 */ - IWL_DECLARE_MCS_RATE(9), /* MCS 9 */ -}; - -enum rs_action { - RS_ACTION_STAY = 0, - RS_ACTION_DOWNSCALE = -1, - RS_ACTION_UPSCALE = 1, -}; - -enum rs_column_mode { - RS_INVALID = 0, - RS_LEGACY, - RS_SISO, - RS_MIMO2, -}; - -#define MAX_NEXT_COLUMNS 7 -#define MAX_COLUMN_CHECKS 3 - -struct rs_tx_column; - -typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct rs_rate *rate, - const struct rs_tx_column *next_col); - -struct rs_tx_column { - enum rs_column_mode mode; - u8 ant; - bool sgi; - enum rs_column next_columns[MAX_NEXT_COLUMNS]; - allow_column_func_t checks[MAX_COLUMN_CHECKS]; -}; - -static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct rs_rate *rate, - const struct rs_tx_column *next_col) -{ - return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); -} - -static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct rs_rate *rate, - const struct rs_tx_column *next_col) -{ - struct iwl_mvm_sta *mvmsta; - struct iwl_mvm_vif *mvmvif; - - if (!sta->ht_cap.ht_supported) - return false; - - if (sta->smps_mode == IEEE80211_SMPS_STATIC) - return false; - - if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2) - return false; - - if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) - return false; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - - if (mvm->nvm_data->sku_cap_mimo_disabled) - return false; - - return true; -} - -static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct rs_rate *rate, - const struct rs_tx_column *next_col) -{ - if (!sta->ht_cap.ht_supported) - return false; - - return true; -} - -static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct rs_rate *rate, - const struct rs_tx_column *next_col) -{ - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - - if (is_ht20(rate) && (ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - return true; - if (is_ht40(rate) && (ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - return true; - if (is_ht80(rate) && (vht_cap->cap & - IEEE80211_VHT_CAP_SHORT_GI_80)) - return true; - - return false; -} - -static const struct rs_tx_column rs_tx_columns[] = { - [RS_COLUMN_LEGACY_ANT_A] = { - .mode = RS_LEGACY, - .ant = ANT_A, - .next_columns = { - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_SISO_ANT_A, - RS_COLUMN_MIMO2, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_ant_allow, - }, - }, - [RS_COLUMN_LEGACY_ANT_B] = { - .mode = RS_LEGACY, - .ant = ANT_B, - .next_columns = { - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_SISO_ANT_B, - RS_COLUMN_MIMO2, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_ant_allow, - }, - }, - [RS_COLUMN_SISO_ANT_A] = { - .mode = RS_SISO, - .ant = ANT_A, - .next_columns = { - RS_COLUMN_SISO_ANT_B, - RS_COLUMN_MIMO2, - RS_COLUMN_SISO_ANT_A_SGI, - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_siso_allow, - rs_ant_allow, - }, - }, - [RS_COLUMN_SISO_ANT_B] = { - .mode = RS_SISO, - .ant = ANT_B, - .next_columns = { - RS_COLUMN_SISO_ANT_A, - RS_COLUMN_MIMO2, - RS_COLUMN_SISO_ANT_B_SGI, - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_siso_allow, - rs_ant_allow, - }, - }, - [RS_COLUMN_SISO_ANT_A_SGI] = { - .mode = RS_SISO, - .ant = ANT_A, - .sgi = true, - .next_columns = { - RS_COLUMN_SISO_ANT_B_SGI, - RS_COLUMN_MIMO2_SGI, - RS_COLUMN_SISO_ANT_A, - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_siso_allow, - rs_ant_allow, - rs_sgi_allow, - }, - }, - [RS_COLUMN_SISO_ANT_B_SGI] = { - .mode = RS_SISO, - .ant = ANT_B, - .sgi = true, - .next_columns = { - RS_COLUMN_SISO_ANT_A_SGI, - RS_COLUMN_MIMO2_SGI, - RS_COLUMN_SISO_ANT_B, - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_siso_allow, - rs_ant_allow, - rs_sgi_allow, - }, - }, - [RS_COLUMN_MIMO2] = { - .mode = RS_MIMO2, - .ant = ANT_AB, - .next_columns = { - RS_COLUMN_SISO_ANT_A, - RS_COLUMN_MIMO2_SGI, - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_mimo_allow, - }, - }, - [RS_COLUMN_MIMO2_SGI] = { - .mode = RS_MIMO2, - .ant = ANT_AB, - .sgi = true, - .next_columns = { - RS_COLUMN_SISO_ANT_A_SGI, - RS_COLUMN_MIMO2, - RS_COLUMN_LEGACY_ANT_A, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - RS_COLUMN_INVALID, - }, - .checks = { - rs_mimo_allow, - rs_sgi_allow, - }, - }, -}; - -static inline u8 rs_extract_rate(u32 rate_n_flags) -{ - /* also works for HT because bits 7:6 are zero there */ - return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK); -} - -static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) -{ - int idx = 0; - - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK; - idx += IWL_RATE_MCS_0_INDEX; - - /* skip 9M not supported in HT*/ - if (idx >= IWL_RATE_9M_INDEX) - idx += 1; - if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) - return idx; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - idx += IWL_RATE_MCS_0_INDEX; - - /* skip 9M not supported in VHT*/ - if (idx >= IWL_RATE_9M_INDEX) - idx++; - if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) - return idx; - } else { - /* legacy rate format, search for match in table */ - - u8 legacy_rate = rs_extract_rate(rate_n_flags); - for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) - if (iwl_rates[idx].plcp == legacy_rate) - return idx; - } - - return IWL_RATE_INVALID; -} - -static void rs_rate_scale_perform(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - int tid); -static void rs_fill_lq_cmd(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - const struct rs_rate *initial_rate); -static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); - -/** - * The following tables contain the expected throughput metrics for all rates - * - * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits - * - * where invalid entries are zeros. - * - * CCK rates are only valid in legacy table and will only be used in G - * (2.4 GHz) band. - */ - -static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = { - 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0 -}; - -/* Expected TpT tables. 4 indexes: - * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI - */ -static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0}, - {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0}, - {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0}, - {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0}, -}; - -static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275}, - {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280}, - {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173}, - {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284}, -}; - -static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308}, - {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312}, - {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466}, - {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691}, -}; - -static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0}, - {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0}, - {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0}, - {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0}, -}; - -static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300}, - {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303}, - {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053}, - {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221}, -}; - -static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319}, - {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320}, - {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219}, - {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545}, -}; - -/* mbps, mcs */ -static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { - { "1", "BPSK DSSS"}, - { "2", "QPSK DSSS"}, - {"5.5", "BPSK CCK"}, - { "11", "QPSK CCK"}, - { "6", "BPSK 1/2"}, - { "9", "BPSK 1/2"}, - { "12", "QPSK 1/2"}, - { "18", "QPSK 3/4"}, - { "24", "16QAM 1/2"}, - { "36", "16QAM 3/4"}, - { "48", "64QAM 2/3"}, - { "54", "64QAM 3/4"}, - { "60", "64QAM 5/6"}, -}; - -#define MCS_INDEX_PER_STREAM (8) - -static const char *rs_pretty_ant(u8 ant) -{ - static const char * const ant_name[] = { - [ANT_NONE] = "None", - [ANT_A] = "A", - [ANT_B] = "B", - [ANT_AB] = "AB", - [ANT_C] = "C", - [ANT_AC] = "AC", - [ANT_BC] = "BC", - [ANT_ABC] = "ABC", - }; - - if (ant > ANT_ABC) - return "UNKNOWN"; - - return ant_name[ant]; -} - -static const char *rs_pretty_lq_type(enum iwl_table_type type) -{ - static const char * const lq_types[] = { - [LQ_NONE] = "NONE", - [LQ_LEGACY_A] = "LEGACY_A", - [LQ_LEGACY_G] = "LEGACY_G", - [LQ_HT_SISO] = "HT SISO", - [LQ_HT_MIMO2] = "HT MIMO", - [LQ_VHT_SISO] = "VHT SISO", - [LQ_VHT_MIMO2] = "VHT MIMO", - }; - - if (type < LQ_NONE || type >= LQ_MAX) - return "UNKNOWN"; - - return lq_types[type]; -} - -static char *rs_pretty_rate(const struct rs_rate *rate) -{ - static char buf[40]; - static const char * const legacy_rates[] = { - [IWL_RATE_1M_INDEX] = "1M", - [IWL_RATE_2M_INDEX] = "2M", - [IWL_RATE_5M_INDEX] = "5.5M", - [IWL_RATE_11M_INDEX] = "11M", - [IWL_RATE_6M_INDEX] = "6M", - [IWL_RATE_9M_INDEX] = "9M", - [IWL_RATE_12M_INDEX] = "12M", - [IWL_RATE_18M_INDEX] = "18M", - [IWL_RATE_24M_INDEX] = "24M", - [IWL_RATE_36M_INDEX] = "36M", - [IWL_RATE_48M_INDEX] = "48M", - [IWL_RATE_54M_INDEX] = "54M", - }; - static const char *const ht_vht_rates[] = { - [IWL_RATE_MCS_0_INDEX] = "MCS0", - [IWL_RATE_MCS_1_INDEX] = "MCS1", - [IWL_RATE_MCS_2_INDEX] = "MCS2", - [IWL_RATE_MCS_3_INDEX] = "MCS3", - [IWL_RATE_MCS_4_INDEX] = "MCS4", - [IWL_RATE_MCS_5_INDEX] = "MCS5", - [IWL_RATE_MCS_6_INDEX] = "MCS6", - [IWL_RATE_MCS_7_INDEX] = "MCS7", - [IWL_RATE_MCS_8_INDEX] = "MCS8", - [IWL_RATE_MCS_9_INDEX] = "MCS9", - }; - const char *rate_str; - - if (is_type_legacy(rate->type)) - rate_str = legacy_rates[rate->index]; - else if (is_type_ht(rate->type) || is_type_vht(rate->type)) - rate_str = ht_vht_rates[rate->index]; - else - rate_str = "BAD_RATE"; - - sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type), - rs_pretty_ant(rate->ant), rate_str); - return buf; -} - -static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, - const char *prefix) -{ - IWL_DEBUG_RATE(mvm, - "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n", - prefix, rs_pretty_rate(rate), rate->bw, - rate->sgi, rate->ldpc, rate->stbc); -} - -static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) -{ - window->data = 0; - window->success_counter = 0; - window->success_ratio = IWL_INVALID_VALUE; - window->counter = 0; - window->average_tpt = IWL_INVALID_VALUE; -} - -static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm, - struct iwl_scale_tbl_info *tbl) -{ - int i; - - IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window(&tbl->win[i]); - - for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) - rs_rate_scale_clear_window(&tbl->tpc_win[i]); -} - -static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) -{ - return (ant_type & valid_antenna) == ant_type; -} - -static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_data, u8 tid, - struct ieee80211_sta *sta) -{ - int ret = -EAGAIN; - - IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", - sta->addr, tid); - ret = ieee80211_start_tx_ba_session(sta, tid, 5000); - if (ret == -EAGAIN) { - /* - * driver and mac80211 is out of sync - * this might be cause by reloading firmware - * stop the tx ba session here - */ - IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n", - tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - return ret; -} - -static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid, - struct iwl_lq_sta *lq_data, - struct ieee80211_sta *sta) -{ - if (tid < IWL_MAX_TID_COUNT) - rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta); - else - IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n", - tid, IWL_MAX_TID_COUNT); -} - -static inline int get_num_of_ant_from_rate(u32 rate_n_flags) -{ - return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_C_MSK); -} - -/* - * Static function to get the expected throughput from an iwl_scale_tbl_info - * that wraps a NULL pointer check - */ -static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) -{ - if (tbl->expected_tpt) - return tbl->expected_tpt[rs_index]; - return 0; -} - -/** - * rs_collect_tx_data - Update the success/failure sliding window - * - * We keep a sliding window of the last 62 packets transmitted - * at this rate. window->data contains the bitmask of successful - * packets. - */ -static int _rs_collect_tx_data(struct iwl_mvm *mvm, - struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes, - struct iwl_rate_scale_data *window) -{ - static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); - s32 fail_count, tpt; - - /* Get expected throughput */ - tpt = get_expected_tpt(tbl, scale_index); - - /* - * Keep track of only the latest 62 tx frame attempts in this rate's - * history window; anything older isn't really relevant any more. - * If we have filled up the sliding window, drop the oldest attempt; - * if the oldest attempt (highest bit in bitmap) shows "success", - * subtract "1" from the success counter (this is the main reason - * we keep these bitmaps!). - */ - while (attempts > 0) { - if (window->counter >= IWL_RATE_MAX_WINDOW) { - /* remove earliest */ - window->counter = IWL_RATE_MAX_WINDOW - 1; - - if (window->data & mask) { - window->data &= ~mask; - window->success_counter--; - } - } - - /* Increment frames-attempted counter */ - window->counter++; - - /* Shift bitmap by one frame to throw away oldest history */ - window->data <<= 1; - - /* Mark the most recent #successes attempts as successful */ - if (successes > 0) { - window->success_counter++; - window->data |= 0x1; - successes--; - } - - attempts--; - } - - /* Calculate current success ratio, avoid divide-by-0! */ - if (window->counter > 0) - window->success_ratio = 128 * (100 * window->success_counter) - / window->counter; - else - window->success_ratio = IWL_INVALID_VALUE; - - fail_count = window->counter - window->success_counter; - - /* Calculate average throughput, if we have enough history. */ - if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) || - (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) - window->average_tpt = (window->success_ratio * tpt + 64) / 128; - else - window->average_tpt = IWL_INVALID_VALUE; - - return 0; -} - -static int rs_collect_tx_data(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes, - u8 reduced_txp) -{ - struct iwl_rate_scale_data *window = NULL; - int ret; - - if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) - return -EINVAL; - - if (tbl->column != RS_COLUMN_INVALID) { - struct lq_sta_pers *pers = &lq_sta->pers; - - pers->tx_stats[tbl->column][scale_index].total += attempts; - pers->tx_stats[tbl->column][scale_index].success += successes; - } - - /* Select window for current tx bit rate */ - window = &(tbl->win[scale_index]); - - ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, - window); - if (ret) - return ret; - - if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) - return -EINVAL; - - window = &tbl->tpc_win[reduced_txp]; - return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, - window); -} - -/* Convert rs_rate object into ucode rate bitmask */ -static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, - struct rs_rate *rate) -{ - u32 ucode_rate = 0; - int index = rate->index; - - ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) & - RATE_MCS_ANT_ABC_MSK); - - if (is_legacy(rate)) { - ucode_rate |= iwl_rates[index].plcp; - if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) - ucode_rate |= RATE_MCS_CCK_MSK; - return ucode_rate; - } - - if (is_ht(rate)) { - if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) { - IWL_ERR(mvm, "Invalid HT rate index %d\n", index); - index = IWL_LAST_HT_RATE; - } - ucode_rate |= RATE_MCS_HT_MSK; - - if (is_ht_siso(rate)) - ucode_rate |= iwl_rates[index].plcp_ht_siso; - else if (is_ht_mimo2(rate)) - ucode_rate |= iwl_rates[index].plcp_ht_mimo2; - else - WARN_ON_ONCE(1); - } else if (is_vht(rate)) { - if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) { - IWL_ERR(mvm, "Invalid VHT rate index %d\n", index); - index = IWL_LAST_VHT_RATE; - } - ucode_rate |= RATE_MCS_VHT_MSK; - if (is_vht_siso(rate)) - ucode_rate |= iwl_rates[index].plcp_vht_siso; - else if (is_vht_mimo2(rate)) - ucode_rate |= iwl_rates[index].plcp_vht_mimo2; - else - WARN_ON_ONCE(1); - - } else { - IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type); - } - - if (is_siso(rate) && rate->stbc) { - /* To enable STBC we need to set both a flag and ANT_AB */ - ucode_rate |= RATE_MCS_ANT_AB_MSK; - ucode_rate |= RATE_MCS_VHT_STBC_MSK; - } - - ucode_rate |= rate->bw; - if (rate->sgi) - ucode_rate |= RATE_MCS_SGI_MSK; - if (rate->ldpc) - ucode_rate |= RATE_MCS_LDPC_MSK; - - return ucode_rate; -} - -/* Convert a ucode rate into an rs_rate object */ -static int rs_rate_from_ucode_rate(const u32 ucode_rate, - enum ieee80211_band band, - struct rs_rate *rate) -{ - u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; - u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate); - u8 nss; - - memset(rate, 0, sizeof(*rate)); - rate->index = iwl_hwrate_to_plcp_idx(ucode_rate); - - if (rate->index == IWL_RATE_INVALID) - return -EINVAL; - - rate->ant = (ant_msk >> RATE_MCS_ANT_POS); - - /* Legacy */ - if (!(ucode_rate & RATE_MCS_HT_MSK) && - !(ucode_rate & RATE_MCS_VHT_MSK)) { - if (num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) - rate->type = LQ_LEGACY_A; - else - rate->type = LQ_LEGACY_G; - } - - return 0; - } - - /* HT or VHT */ - if (ucode_rate & RATE_MCS_SGI_MSK) - rate->sgi = true; - if (ucode_rate & RATE_MCS_LDPC_MSK) - rate->ldpc = true; - if (ucode_rate & RATE_MCS_VHT_STBC_MSK) - rate->stbc = true; - if (ucode_rate & RATE_MCS_BF_MSK) - rate->bfer = true; - - rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; - - if (ucode_rate & RATE_MCS_HT_MSK) { - nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >> - RATE_HT_MCS_NSS_POS) + 1; - - if (nss == 1) { - rate->type = LQ_HT_SISO; - WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, - "stbc %d bfer %d", - rate->stbc, rate->bfer); - } else if (nss == 2) { - rate->type = LQ_HT_MIMO2; - WARN_ON_ONCE(num_of_ant != 2); - } else { - WARN_ON_ONCE(1); - } - } else if (ucode_rate & RATE_MCS_VHT_MSK) { - nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; - - if (nss == 1) { - rate->type = LQ_VHT_SISO; - WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, - "stbc %d bfer %d", - rate->stbc, rate->bfer); - } else if (nss == 2) { - rate->type = LQ_VHT_MIMO2; - WARN_ON_ONCE(num_of_ant != 2); - } else { - WARN_ON_ONCE(1); - } - } - - WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160); - WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && - !is_vht(rate)); - - return 0; -} - -/* switch to another antenna/antennas and return 1 */ -/* if no other valid antenna found, return 0 */ -static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate) -{ - u8 new_ant_type; - - if (!rate->ant || rate->ant > ANT_ABC) - return 0; - - if (!rs_is_valid_ant(valid_ant, rate->ant)) - return 0; - - new_ant_type = ant_toggle_lookup[rate->ant]; - - while ((new_ant_type != rate->ant) && - !rs_is_valid_ant(valid_ant, new_ant_type)) - new_ant_type = ant_toggle_lookup[new_ant_type]; - - if (new_ant_type == rate->ant) - return 0; - - rate->ant = new_ant_type; - - return 1; -} - -static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, - struct rs_rate *rate) -{ - if (is_legacy(rate)) - return lq_sta->active_legacy_rate; - else if (is_siso(rate)) - return lq_sta->active_siso_rate; - else if (is_mimo2(rate)) - return lq_sta->active_mimo2_rate; - - WARN_ON_ONCE(1); - return 0; -} - -static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, - int rate_type) -{ - u8 high = IWL_RATE_INVALID; - u8 low = IWL_RATE_INVALID; - - /* 802.11A or ht walks to the next literal adjacent rate in - * the rate table */ - if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) { - int i; - u32 mask; - - /* Find the previous rate that is in the rate mask */ - i = index - 1; - for (mask = (1 << i); i >= 0; i--, mask >>= 1) { - if (rate_mask & mask) { - low = i; - break; - } - } - - /* Find the next rate that is in the rate mask */ - i = index + 1; - for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { - if (rate_mask & mask) { - high = i; - break; - } - } - - return (high << 8) | low; - } - - low = index; - while (low != IWL_RATE_INVALID) { - low = iwl_rates[low].prev_rs; - if (low == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << low)) - break; - } - - high = index; - while (high != IWL_RATE_INVALID) { - high = iwl_rates[high].next_rs; - if (high == IWL_RATE_INVALID) - break; - if (rate_mask & (1 << high)) - break; - } - - return (high << 8) | low; -} - -static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta, - struct rs_rate *rate) -{ - return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate); -} - -/* Get the next supported lower rate in the current column. - * Return true if bottom rate in the current column was reached - */ -static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta, - struct rs_rate *rate) -{ - u8 low; - u16 high_low; - u16 rate_mask; - struct iwl_mvm *mvm = lq_sta->pers.drv; - - rate_mask = rs_get_supported_rates(lq_sta, rate); - high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask, - rate->type); - low = high_low & 0xff; - - /* Bottom rate of column reached */ - if (low == IWL_RATE_INVALID) - return true; - - rate->index = low; - return false; -} - -/* Get the next rate to use following a column downgrade */ -static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, - struct rs_rate *rate) -{ - struct iwl_mvm *mvm = lq_sta->pers.drv; - - if (is_legacy(rate)) { - /* No column to downgrade from Legacy */ - return; - } else if (is_siso(rate)) { - /* Downgrade to Legacy if we were in SISO */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) - rate->type = LQ_LEGACY_A; - else - rate->type = LQ_LEGACY_G; - - rate->bw = RATE_MCS_CHAN_WIDTH_20; - - WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX || - rate->index > IWL_RATE_MCS_9_INDEX); - - rate->index = rs_ht_to_legacy[rate->index]; - rate->ldpc = false; - } else { - /* Downgrade to SISO with same MCS if in MIMO */ - rate->type = is_vht_mimo2(rate) ? - LQ_VHT_SISO : LQ_HT_SISO; - } - - if (num_of_ant(rate->ant) > 1) - rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm)); - - /* Relevant in both switching to SISO or Legacy */ - rate->sgi = false; - - if (!rs_rate_supported(lq_sta, rate)) - rs_get_lower_rate_in_column(lq_sta, rate); -} - -/* Check if both rates are identical - * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B - * with a rate indicating STBC/BFER and ANT_AB. - */ -static inline bool rs_rate_equal(struct rs_rate *a, - struct rs_rate *b, - bool allow_ant_mismatch) - -{ - bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) && - (a->bfer == b->bfer); - - if (allow_ant_mismatch) { - if (a->stbc || a->bfer) { - WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d", - a->stbc, a->bfer, a->ant); - ant_match |= (b->ant == ANT_A || b->ant == ANT_B); - } else if (b->stbc || b->bfer) { - WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d", - b->stbc, b->bfer, b->ant); - ant_match |= (a->ant == ANT_A || a->ant == ANT_B); - } - } - - return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && - (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match; -} - -/* Check if both rates share the same column */ -static inline bool rs_rate_column_match(struct rs_rate *a, - struct rs_rate *b) -{ - bool ant_match; - - if (a->stbc || a->bfer) - ant_match = (b->ant == ANT_A || b->ant == ANT_B); - else - ant_match = (a->ant == b->ant); - - return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) - && ant_match; -} - -static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate) -{ - if (is_legacy(rate)) { - if (rate->ant == ANT_A) - return RS_COLUMN_LEGACY_ANT_A; - - if (rate->ant == ANT_B) - return RS_COLUMN_LEGACY_ANT_B; - - goto err; - } - - if (is_siso(rate)) { - if (rate->ant == ANT_A || rate->stbc || rate->bfer) - return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI : - RS_COLUMN_SISO_ANT_A; - - if (rate->ant == ANT_B) - return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI : - RS_COLUMN_SISO_ANT_B; - - goto err; - } - - if (is_mimo(rate)) - return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2; - -err: - return RS_COLUMN_INVALID; -} - -static u8 rs_get_tid(struct ieee80211_hdr *hdr) -{ - u8 tid = IWL_MAX_TID_COUNT; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } - - if (unlikely(tid > IWL_MAX_TID_COUNT)) - tid = IWL_MAX_TID_COUNT; - - return tid; -} - -void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, struct ieee80211_tx_info *info) -{ - int legacy_success; - int retries; - int i; - struct iwl_lq_cmd *table; - u32 lq_hwrate; - struct rs_rate lq_rate, tx_resp_rate; - struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; - u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; - u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; - bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_LQ_SS_PARAMS); - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!lq_sta) { - IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n"); - return; - } else if (!lq_sta->pers.drv) { - IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); - return; - } - - /* This packet was aggregated but doesn't carry status info */ - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && - !(info->flags & IEEE80211_TX_STAT_AMPDU)) - return; - - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); - -#ifdef CONFIG_MAC80211_DEBUGFS - /* Disable last tx check if we are debugging with fixed rate but - * update tx stats */ - if (lq_sta->pers.dbg_fixed_rate) { - int index = tx_resp_rate.index; - enum rs_column column; - int attempts, success; - - column = rs_get_column_from_rate(&tx_resp_rate); - if (WARN_ONCE(column == RS_COLUMN_INVALID, - "Can't map rate 0x%x to column", - tx_resp_hwrate)) - return; - - if (info->flags & IEEE80211_TX_STAT_AMPDU) { - attempts = info->status.ampdu_len; - success = info->status.ampdu_ack_len; - } else { - attempts = info->status.rates[0].count; - success = !!(info->flags & IEEE80211_TX_STAT_ACK); - } - - lq_sta->pers.tx_stats[column][index].total += attempts; - lq_sta->pers.tx_stats[column][index].success += success; - - IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n", - tx_resp_hwrate, success, attempts); - return; - } -#endif - - if (time_after(jiffies, - (unsigned long)(lq_sta->last_tx + - (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { - int t; - - IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - for (t = 0; t < IWL_MAX_TID_COUNT; t++) - ieee80211_stop_tx_ba_session(sta, t); - - iwl_mvm_rs_rate_init(mvm, sta, info->band, false); - return; - } - lq_sta->last_tx = jiffies; - - /* Ignore this Tx frame response if its initial rate doesn't match - * that of latest Link Quality command. There may be stragglers - * from a previous Link Quality command, but we're no longer interested - * in those; they're either from the "active" mode while we're trying - * to check "search" mode, or a prior "search" mode after we've moved - * to a new "search" mode (which might become the new "active" mode). - */ - table = &lq_sta->lq; - lq_hwrate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); - - /* Here we actually compare this rate to the latest LQ command */ - if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { - IWL_DEBUG_RATE(mvm, - "initial tx resp rate 0x%x does not match 0x%x\n", - tx_resp_hwrate, lq_hwrate); - - /* - * Since rates mis-match, the last LQ command may have failed. - * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with - * ... driver. - */ - lq_sta->missed_rate_counter++; - if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) { - lq_sta->missed_rate_counter = 0; - IWL_DEBUG_RATE(mvm, - "Too many rates mismatch. Send sync LQ. rs_state %d\n", - lq_sta->rs_state); - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); - } - /* Regardless, ignore this status info for outdated rate */ - return; - } else - /* Rate did match, so reset the missed_rate_counter */ - lq_sta->missed_rate_counter = 0; - - if (!lq_sta->search_better_tbl) { - curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - } else { - curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - } - - if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) { - IWL_DEBUG_RATE(mvm, - "Neither active nor search matches tx rate\n"); - tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); - tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); - rs_dump_rate(mvm, &lq_rate, "ACTUAL"); - - /* - * no matching table found, let's by-pass the data collection - * and continue to perform rate scale to find the rate table - */ - rs_stay_in_table(lq_sta, true); - goto done; - } - - /* - * Updating the frame history depends on whether packets were - * aggregated. - * - * For aggregation, all packets were transmitted at the same rate, the - * first index into rate scale table. - */ - if (info->flags & IEEE80211_TX_STAT_AMPDU) { - /* ampdu_ack_len = 0 marks no BA was received. In this case - * treat it as a single frame loss as we don't want the success - * ratio to dip too quickly because a BA wasn't received - */ - if (info->status.ampdu_ack_len == 0) - info->status.ampdu_len = 1; - - rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, - info->status.ampdu_len, - info->status.ampdu_ack_len, - reduced_txp); - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { - lq_sta->total_success += info->status.ampdu_ack_len; - lq_sta->total_failed += (info->status.ampdu_len - - info->status.ampdu_ack_len); - } - } else { - /* For legacy, update frame history with for each Tx retry. */ - retries = info->status.rates[0].count - 1; - /* HW doesn't send more than 15 retries */ - retries = min(retries, 15); - - /* The last transmission may have been successful */ - legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); - /* Collect data for each rate used during failed TX attempts */ - for (i = 0; i <= retries; ++i) { - lq_hwrate = le32_to_cpu(table->rs_table[i]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, - &lq_rate); - /* - * Only collect stats if retried rate is in the same RS - * table as active/search. - */ - if (rs_rate_column_match(&lq_rate, &curr_tbl->rate)) - tmp_tbl = curr_tbl; - else if (rs_rate_column_match(&lq_rate, - &other_tbl->rate)) - tmp_tbl = other_tbl; - else - continue; - - rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, - 1, i < retries ? 0 : legacy_success, - reduced_txp); - } - - /* Update success/fail counts if not searching for new mode */ - if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { - lq_sta->total_success += legacy_success; - lq_sta->total_failed += retries + (1 - legacy_success); - } - } - /* The last TX rate is cached in lq_sta; it's set in if/else above */ - lq_sta->last_rate_n_flags = lq_hwrate; - IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); -done: - /* See if there's a better rate or modulation mode to try. */ - if (sta->supp_rates[info->band]) - rs_rate_scale_perform(mvm, sta, lq_sta, tid); -} - -/* - * mac80211 sends us Tx status - */ -static void rs_mac80211_tx_status(void *mvm_r, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r; - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (!iwl_mvm_sta_from_mac80211(sta)->vif) - return; - - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - - iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info); -} - -/* - * Begin a period of staying with a selected modulation mode. - * Set "stay_in_tbl" flag to prevent any mode switches. - * Set frame tx success limits according to legacy vs. high-throughput, - * and reset overall (spanning all rates) tx success history statistics. - * These control how long we stay using same modulation mode before - * searching for a new mode. - */ -static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy, - struct iwl_lq_sta *lq_sta) -{ - IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n"); - lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN; - if (is_legacy) { - lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT; - lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT; - } else { - lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT; - lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT; - lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT; - } - lq_sta->table_count = 0; - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = jiffies; - lq_sta->visited_columns = 0; -} - -static inline int rs_get_max_rate_from_mask(unsigned long rate_mask) -{ - if (rate_mask) - return find_last_bit(&rate_mask, BITS_PER_LONG); - return IWL_RATE_INVALID; -} - -static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta, - const struct rs_tx_column *column) -{ - switch (column->mode) { - case RS_LEGACY: - return lq_sta->max_legacy_rate_idx; - case RS_SISO: - return lq_sta->max_siso_rate_idx; - case RS_MIMO2: - return lq_sta->max_mimo2_rate_idx; - default: - WARN_ON_ONCE(1); - } - - return lq_sta->max_legacy_rate_idx; -} - -static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, - const struct rs_tx_column *column, - u32 bw) -{ - /* Used to choose among HT tables */ - const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; - - if (WARN_ON_ONCE(column->mode != RS_LEGACY && - column->mode != RS_SISO && - column->mode != RS_MIMO2)) - return expected_tpt_legacy; - - /* Legacy rates have only one table */ - if (column->mode == RS_LEGACY) - return expected_tpt_legacy; - - ht_tbl_pointer = expected_tpt_mimo2_20MHz; - /* Choose among many HT tables depending on number of streams - * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation - * status */ - if (column->mode == RS_SISO) { - switch (bw) { - case RATE_MCS_CHAN_WIDTH_20: - ht_tbl_pointer = expected_tpt_siso_20MHz; - break; - case RATE_MCS_CHAN_WIDTH_40: - ht_tbl_pointer = expected_tpt_siso_40MHz; - break; - case RATE_MCS_CHAN_WIDTH_80: - ht_tbl_pointer = expected_tpt_siso_80MHz; - break; - default: - WARN_ON_ONCE(1); - } - } else if (column->mode == RS_MIMO2) { - switch (bw) { - case RATE_MCS_CHAN_WIDTH_20: - ht_tbl_pointer = expected_tpt_mimo2_20MHz; - break; - case RATE_MCS_CHAN_WIDTH_40: - ht_tbl_pointer = expected_tpt_mimo2_40MHz; - break; - case RATE_MCS_CHAN_WIDTH_80: - ht_tbl_pointer = expected_tpt_mimo2_80MHz; - break; - default: - WARN_ON_ONCE(1); - } - } else { - WARN_ON_ONCE(1); - } - - if (!column->sgi && !lq_sta->is_agg) /* Normal */ - return ht_tbl_pointer[0]; - else if (column->sgi && !lq_sta->is_agg) /* SGI */ - return ht_tbl_pointer[1]; - else if (!column->sgi && lq_sta->is_agg) /* AGG */ - return ht_tbl_pointer[2]; - else /* AGG+SGI */ - return ht_tbl_pointer[3]; -} - -static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl) -{ - struct rs_rate *rate = &tbl->rate; - const struct rs_tx_column *column = &rs_tx_columns[tbl->column]; - - tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); -} - -static s32 rs_get_best_rate(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, /* "search" */ - unsigned long rate_mask, s8 index) -{ - struct iwl_scale_tbl_info *active_tbl = - &(lq_sta->lq_info[lq_sta->active_tbl]); - s32 success_ratio = active_tbl->win[index].success_ratio; - u16 expected_current_tpt = active_tbl->expected_tpt[index]; - const u16 *tpt_tbl = tbl->expected_tpt; - u16 high_low; - u32 target_tpt; - int rate_idx; - - if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) { - target_tpt = 100 * expected_current_tpt; - IWL_DEBUG_RATE(mvm, - "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n", - success_ratio, target_tpt); - } else { - target_tpt = lq_sta->last_tpt; - IWL_DEBUG_RATE(mvm, - "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n", - success_ratio, target_tpt); - } - - rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG); - - while (rate_idx != IWL_RATE_INVALID) { - if (target_tpt < (100 * tpt_tbl[rate_idx])) - break; - - high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask, - tbl->rate.type); - - rate_idx = (high_low >> 8) & 0xff; - } - - IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n", - rate_idx, target_tpt, - rate_idx != IWL_RATE_INVALID ? - 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE); - - return rate_idx; -} - -static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) -{ - if (sta->bandwidth >= IEEE80211_STA_RX_BW_80) - return RATE_MCS_CHAN_WIDTH_80; - else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) - return RATE_MCS_CHAN_WIDTH_40; - - return RATE_MCS_CHAN_WIDTH_20; -} - -/* - * Check whether we should continue using same modulation mode, or - * begin search for a new mode, based on: - * 1) # tx successes or failures while using this mode - * 2) # times calling this function - * 3) elapsed time in this mode (not used, for now) - */ -static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) -{ - struct iwl_scale_tbl_info *tbl; - int active_tbl; - int flush_interval_passed = 0; - struct iwl_mvm *mvm; - - mvm = lq_sta->pers.drv; - active_tbl = lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - - /* If we've been disallowing search, see if we should now allow it */ - if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { - /* Elapsed time using current modulation mode */ - if (lq_sta->flush_timer) - flush_interval_passed = - time_after(jiffies, - (unsigned long)(lq_sta->flush_timer + - (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ))); - - /* - * Check if we should allow search for new modulation mode. - * If many frames have failed or succeeded, or we've used - * this same modulation for a long time, allow search, and - * reset history stats that keep track of whether we should - * allow a new search. Also (below) reset all bitmaps and - * stats in active history. - */ - if (force_search || - (lq_sta->total_failed > lq_sta->max_failure_limit) || - (lq_sta->total_success > lq_sta->max_success_limit) || - ((!lq_sta->search_better_tbl) && - (lq_sta->flush_timer) && (flush_interval_passed))) { - IWL_DEBUG_RATE(mvm, - "LQ: stay is expired %d %d %d\n", - lq_sta->total_failed, - lq_sta->total_success, - flush_interval_passed); - - /* Allow search for new mode */ - lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED; - IWL_DEBUG_RATE(mvm, - "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n"); - lq_sta->total_failed = 0; - lq_sta->total_success = 0; - lq_sta->flush_timer = 0; - /* mark the current column as visited */ - lq_sta->visited_columns = BIT(tbl->column); - /* - * Else if we've used this modulation mode enough repetitions - * (regardless of elapsed time or success/failure), reset - * history bitmaps and rate-specific stats for all rates in - * active table. - */ - } else { - lq_sta->table_count++; - if (lq_sta->table_count >= - lq_sta->table_count_limit) { - lq_sta->table_count = 0; - - IWL_DEBUG_RATE(mvm, - "LQ: stay in table clear win\n"); - rs_rate_scale_clear_tbl_windows(mvm, tbl); - } - } - - /* If transitioning to allow "search", reset all history - * bitmaps and stats in active table (this will become the new - * "search" table). */ - if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { - rs_rate_scale_clear_tbl_windows(mvm, tbl); - } - } -} - -/* - * setup rate table in uCode - */ -static void rs_update_rate_tbl(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl) -{ - rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); -} - -static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - enum rs_action scale_action) -{ - if (sta->bandwidth != IEEE80211_STA_RX_BW_80) - return false; - - if (!is_vht_siso(&tbl->rate)) - return false; - - if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) && - (tbl->rate.index == IWL_RATE_MCS_0_INDEX) && - (scale_action == RS_ACTION_DOWNSCALE)) { - tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20; - tbl->rate.index = IWL_RATE_MCS_4_INDEX; - IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n"); - goto tweaked; - } - - /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is - * sustainable, i.e. we're past the test window. We can't go back - * if MCS5 is just tested as this will happen always after switching - * to 20Mhz MCS4 because the rate stats are cleared. - */ - if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) && - (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) && - (scale_action == RS_ACTION_STAY)) || - ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) && - (scale_action == RS_ACTION_UPSCALE)))) { - tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80; - tbl->rate.index = IWL_RATE_MCS_1_INDEX; - IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n"); - goto tweaked; - } - - return false; - -tweaked: - rs_set_expected_tpt_table(lq_sta, tbl); - rs_rate_scale_clear_tbl_windows(mvm, tbl); - return true; -} - -static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) -{ - int i, j, max_rate; - enum rs_column next_col_id; - const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; - const struct rs_tx_column *next_col; - allow_column_func_t allow_func; - u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm); - const u16 *expected_tpt_tbl; - u16 tpt, max_expected_tpt; - - for (i = 0; i < MAX_NEXT_COLUMNS; i++) { - next_col_id = curr_col->next_columns[i]; - - if (next_col_id == RS_COLUMN_INVALID) - continue; - - if (lq_sta->visited_columns & BIT(next_col_id)) { - IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n", - next_col_id); - continue; - } - - next_col = &rs_tx_columns[next_col_id]; - - if (!rs_is_valid_ant(valid_ants, next_col->ant)) { - IWL_DEBUG_RATE(mvm, - "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n", - next_col_id, valid_ants, next_col->ant); - continue; - } - - for (j = 0; j < MAX_COLUMN_CHECKS; j++) { - allow_func = next_col->checks[j]; - if (allow_func && !allow_func(mvm, sta, &tbl->rate, - next_col)) - break; - } - - if (j != MAX_COLUMN_CHECKS) { - IWL_DEBUG_RATE(mvm, - "Skip column %d: not allowed (check %d failed)\n", - next_col_id, j); - - continue; - } - - tpt = lq_sta->last_tpt / 100; - expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col, - rs_bw_from_sta_bw(sta)); - if (WARN_ON_ONCE(!expected_tpt_tbl)) - continue; - - max_rate = rs_get_max_allowed_rate(lq_sta, next_col); - if (max_rate == IWL_RATE_INVALID) { - IWL_DEBUG_RATE(mvm, - "Skip column %d: no rate is allowed in this column\n", - next_col_id); - continue; - } - - max_expected_tpt = expected_tpt_tbl[max_rate]; - if (tpt >= max_expected_tpt) { - IWL_DEBUG_RATE(mvm, - "Skip column %d: can't beat current TPT. Max expected %d current %d\n", - next_col_id, max_expected_tpt, tpt); - continue; - } - - IWL_DEBUG_RATE(mvm, - "Found potential column %d. Max expected %d current %d\n", - next_col_id, max_expected_tpt, tpt); - break; - } - - if (i == MAX_NEXT_COLUMNS) - return RS_COLUMN_INVALID; - - return next_col_id; -} - -static int rs_switch_to_column(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct ieee80211_sta *sta, - enum rs_column col_id) -{ - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct rs_rate *rate = &search_tbl->rate; - const struct rs_tx_column *column = &rs_tx_columns[col_id]; - const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - unsigned long rate_mask = 0; - u32 rate_idx = 0; - - memcpy(search_tbl, tbl, sz); - - rate->sgi = column->sgi; - rate->ant = column->ant; - - if (column->mode == RS_LEGACY) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) - rate->type = LQ_LEGACY_A; - else - rate->type = LQ_LEGACY_G; - - rate->bw = RATE_MCS_CHAN_WIDTH_20; - rate->ldpc = false; - rate_mask = lq_sta->active_legacy_rate; - } else if (column->mode == RS_SISO) { - rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; - rate_mask = lq_sta->active_siso_rate; - } else if (column->mode == RS_MIMO2) { - rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; - rate_mask = lq_sta->active_mimo2_rate; - } else { - WARN_ON_ONCE("Bad column mode"); - } - - if (column->mode != RS_LEGACY) { - rate->bw = rs_bw_from_sta_bw(sta); - rate->ldpc = lq_sta->ldpc; - } - - search_tbl->column = col_id; - rs_set_expected_tpt_table(lq_sta, search_tbl); - - lq_sta->visited_columns |= BIT(col_id); - - /* Get the best matching rate if we're changing modes. e.g. - * SISO->MIMO, LEGACY->SISO, MIMO->SISO - */ - if (curr_column->mode != column->mode) { - rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl, - rate_mask, rate->index); - - if ((rate_idx == IWL_RATE_INVALID) || - !(BIT(rate_idx) & rate_mask)) { - IWL_DEBUG_RATE(mvm, - "can not switch with index %d" - " rate mask %lx\n", - rate_idx, rate_mask); - - goto err; - } - - rate->index = rate_idx; - } - - IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n", - col_id, rate->index); - - return 0; - -err: - rate->type = LQ_NONE; - return -1; -} - -static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm, - struct iwl_scale_tbl_info *tbl, - s32 sr, int low, int high, - int current_tpt, - int low_tpt, int high_tpt) -{ - enum rs_action action = RS_ACTION_STAY; - - if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) || - (current_tpt == 0)) { - IWL_DEBUG_RATE(mvm, - "Decrease rate because of low SR\n"); - return RS_ACTION_DOWNSCALE; - } - - if ((low_tpt == IWL_INVALID_VALUE) && - (high_tpt == IWL_INVALID_VALUE) && - (high != IWL_RATE_INVALID)) { - IWL_DEBUG_RATE(mvm, - "No data about high/low rates. Increase rate\n"); - return RS_ACTION_UPSCALE; - } - - if ((high_tpt == IWL_INVALID_VALUE) && - (high != IWL_RATE_INVALID) && - (low_tpt != IWL_INVALID_VALUE) && - (low_tpt < current_tpt)) { - IWL_DEBUG_RATE(mvm, - "No data about high rate and low rate is worse. Increase rate\n"); - return RS_ACTION_UPSCALE; - } - - if ((high_tpt != IWL_INVALID_VALUE) && - (high_tpt > current_tpt)) { - IWL_DEBUG_RATE(mvm, - "Higher rate is better. Increate rate\n"); - return RS_ACTION_UPSCALE; - } - - if ((low_tpt != IWL_INVALID_VALUE) && - (high_tpt != IWL_INVALID_VALUE) && - (low_tpt < current_tpt) && - (high_tpt < current_tpt)) { - IWL_DEBUG_RATE(mvm, - "Both high and low are worse. Maintain rate\n"); - return RS_ACTION_STAY; - } - - if ((low_tpt != IWL_INVALID_VALUE) && - (low_tpt > current_tpt)) { - IWL_DEBUG_RATE(mvm, - "Lower rate is better\n"); - action = RS_ACTION_DOWNSCALE; - goto out; - } - - if ((low_tpt == IWL_INVALID_VALUE) && - (low != IWL_RATE_INVALID)) { - IWL_DEBUG_RATE(mvm, - "No data about lower rate\n"); - action = RS_ACTION_DOWNSCALE; - goto out; - } - - IWL_DEBUG_RATE(mvm, "Maintain rate\n"); - -out: - if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) { - if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) { - IWL_DEBUG_RATE(mvm, - "SR is above NO DECREASE. Avoid downscale\n"); - action = RS_ACTION_STAY; - } else if (current_tpt > (100 * tbl->expected_tpt[low])) { - IWL_DEBUG_RATE(mvm, - "Current TPT is higher than max expected in low rate. Avoid downscale\n"); - action = RS_ACTION_STAY; - } else { - IWL_DEBUG_RATE(mvm, "Decrease rate\n"); - } - } - - return action; -} - -static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) -{ - /* Our chip supports Tx STBC and the peer is an HT/VHT STA which - * supports STBC of at least 1*SS - */ - if (!lq_sta->stbc_capable) - return false; - - if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) - return false; - - return true; -} - -static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, - int *weaker, int *stronger) -{ - *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP; - if (*weaker > TPC_MAX_REDUCTION) - *weaker = TPC_INVALID; - - *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP; - if (*stronger < 0) - *stronger = TPC_INVALID; -} - -static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct rs_rate *rate, enum ieee80211_band band) -{ - int index = rate->index; - bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); - bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION && - !vif->bss_conf.ps); - - IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n", - cam, sta_ps_disabled); - /* - * allow tpc only if power management is enabled, or bt coex - * activity grade allows it and we are on 2.4Ghz. - */ - if ((cam || sta_ps_disabled) && - !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band)) - return false; - - IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type); - if (is_legacy(rate)) - return index == IWL_RATE_54M_INDEX; - if (is_ht(rate)) - return index == IWL_RATE_MCS_7_INDEX; - if (is_vht(rate)) - return index == IWL_RATE_MCS_7_INDEX || - index == IWL_RATE_MCS_8_INDEX || - index == IWL_RATE_MCS_9_INDEX; - - WARN_ON_ONCE(1); - return false; -} - -enum tpc_action { - TPC_ACTION_STAY, - TPC_ACTION_DECREASE, - TPC_ACTION_INCREASE, - TPC_ACTION_NO_RESTIRCTION, -}; - -static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, - s32 sr, int weak, int strong, - int current_tpt, - int weak_tpt, int strong_tpt) -{ - /* stay until we have valid tpt */ - if (current_tpt == IWL_INVALID_VALUE) { - IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n"); - return TPC_ACTION_STAY; - } - - /* Too many failures, increase txp */ - if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) || - current_tpt == 0) { - IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n"); - return TPC_ACTION_NO_RESTIRCTION; - } - - /* try decreasing first if applicable */ - if (weak != TPC_INVALID) { - if (weak_tpt == IWL_INVALID_VALUE && - (strong_tpt == IWL_INVALID_VALUE || - current_tpt >= strong_tpt)) { - IWL_DEBUG_RATE(mvm, - "no weak txp measurement. decrease txp\n"); - return TPC_ACTION_DECREASE; - } - - if (weak_tpt > current_tpt) { - IWL_DEBUG_RATE(mvm, - "lower txp has better tpt. decrease txp\n"); - return TPC_ACTION_DECREASE; - } - } - - /* next, increase if needed */ - if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && - strong != TPC_INVALID) { - if (weak_tpt == IWL_INVALID_VALUE && - strong_tpt != IWL_INVALID_VALUE && - current_tpt < strong_tpt) { - IWL_DEBUG_RATE(mvm, - "higher txp has better tpt. increase txp\n"); - return TPC_ACTION_INCREASE; - } - - if (weak_tpt < current_tpt && - (strong_tpt == IWL_INVALID_VALUE || - strong_tpt > current_tpt)) { - IWL_DEBUG_RATE(mvm, - "lower txp has worse tpt. increase txp\n"); - return TPC_ACTION_INCREASE; - } - } - - IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n"); - return TPC_ACTION_STAY; -} - -static bool rs_tpc_perform(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl) -{ - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - struct ieee80211_vif *vif = mvm_sta->vif; - struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_band band; - struct iwl_rate_scale_data *window; - struct rs_rate *rate = &tbl->rate; - enum tpc_action action; - s32 sr; - u8 cur = lq_sta->lq.reduced_tpc; - int current_tpt; - int weak, strong; - int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE; - -#ifdef CONFIG_MAC80211_DEBUGFS - if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) { - IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n", - lq_sta->pers.dbg_fixed_txp_reduction); - lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction; - return cur != lq_sta->pers.dbg_fixed_txp_reduction; - } -#endif - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - if (WARN_ON(!chanctx_conf)) - band = IEEE80211_NUM_BANDS; - else - band = chanctx_conf->def.chan->band; - rcu_read_unlock(); - - if (!rs_tpc_allowed(mvm, vif, rate, band)) { - IWL_DEBUG_RATE(mvm, - "tpc is not allowed. remove txp restrictions\n"); - lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; - return cur != TPC_NO_REDUCTION; - } - - rs_get_adjacent_txp(mvm, cur, &weak, &strong); - - /* Collect measured throughputs for current and adjacent rates */ - window = tbl->tpc_win; - sr = window[cur].success_ratio; - current_tpt = window[cur].average_tpt; - if (weak != TPC_INVALID) - weak_tpt = window[weak].average_tpt; - if (strong != TPC_INVALID) - strong_tpt = window[strong].average_tpt; - - IWL_DEBUG_RATE(mvm, - "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n", - cur, current_tpt, sr, weak, strong, - weak_tpt, strong_tpt); - - action = rs_get_tpc_action(mvm, sr, weak, strong, - current_tpt, weak_tpt, strong_tpt); - - /* override actions if we are on the edge */ - if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) { - IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n"); - action = TPC_ACTION_STAY; - } else if (strong == TPC_INVALID && - (action == TPC_ACTION_INCREASE || - action == TPC_ACTION_NO_RESTIRCTION)) { - IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n"); - action = TPC_ACTION_STAY; - } - - switch (action) { - case TPC_ACTION_DECREASE: - lq_sta->lq.reduced_tpc = weak; - return true; - case TPC_ACTION_INCREASE: - lq_sta->lq.reduced_tpc = strong; - return true; - case TPC_ACTION_NO_RESTIRCTION: - lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; - return true; - case TPC_ACTION_STAY: - /* do nothing */ - break; - } - return false; -} - -/* - * Do rate scaling and search for new modulation mode. - */ -static void rs_rate_scale_perform(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - int tid) -{ - int low = IWL_RATE_INVALID; - int high = IWL_RATE_INVALID; - int index; - struct iwl_rate_scale_data *window = NULL; - int current_tpt = IWL_INVALID_VALUE; - int low_tpt = IWL_INVALID_VALUE; - int high_tpt = IWL_INVALID_VALUE; - u32 fail_count; - enum rs_action scale_action = RS_ACTION_STAY; - u16 rate_mask; - u8 update_lq = 0; - struct iwl_scale_tbl_info *tbl, *tbl1; - u8 active_tbl = 0; - u8 done_search = 0; - u16 high_low; - s32 sr; - u8 prev_agg = lq_sta->is_agg; - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data; - struct rs_rate *rate; - - lq_sta->is_agg = !!sta_priv->agg_tids; - - /* - * Select rate-scale / modulation-mode table to work with in - * the rest of this function: "search" if searching for better - * modulation mode, or "active" if doing rate scaling within a mode. - */ - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - rate = &tbl->rate; - - if (prev_agg != lq_sta->is_agg) { - IWL_DEBUG_RATE(mvm, - "Aggregation changed: prev %d current %d. Update expected TPT table\n", - prev_agg, lq_sta->is_agg); - rs_set_expected_tpt_table(lq_sta, tbl); - rs_rate_scale_clear_tbl_windows(mvm, tbl); - } - - /* current tx rate */ - index = rate->index; - - /* rates available for this association, and for modulation mode */ - rate_mask = rs_get_supported_rates(lq_sta, rate); - - if (!(BIT(index) & rate_mask)) { - IWL_ERR(mvm, "Current Rate is not valid\n"); - if (lq_sta->search_better_tbl) { - /* revert to active table if search table is not valid*/ - rate->type = LQ_NONE; - lq_sta->search_better_tbl = 0; - tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - rs_update_rate_tbl(mvm, sta, lq_sta, tbl); - } - return; - } - - /* Get expected throughput table and history window for current rate */ - if (!tbl->expected_tpt) { - IWL_ERR(mvm, "tbl->expected_tpt is NULL\n"); - return; - } - - /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ - window = &(tbl->win[index]); - - /* - * If there is not enough history to calculate actual average - * throughput, keep analyzing results of more tx frames, without - * changing rate or mode (bypass most of the rest of this function). - * Set up new rate table in uCode only if old rate is not supported - * in current association (use new rate found above). - */ - fail_count = window->counter - window->success_counter; - if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) && - (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) { - IWL_DEBUG_RATE(mvm, - "%s: Test Window: succ %d total %d\n", - rs_pretty_rate(rate), - window->success_counter, window->counter); - - /* Can't calculate this yet; not enough history */ - window->average_tpt = IWL_INVALID_VALUE; - - /* Should we stay with this modulation mode, - * or search for a new one? */ - rs_stay_in_table(lq_sta, false); - - return; - } - - /* If we are searching for better modulation mode, check success. */ - if (lq_sta->search_better_tbl) { - /* If good success, continue using the "search" mode; - * no need to send new link quality command, since we're - * continuing to use the setup that we've been trying. */ - if (window->average_tpt > lq_sta->last_tpt) { - IWL_DEBUG_RATE(mvm, - "SWITCHING TO NEW TABLE SR: %d " - "cur-tpt %d old-tpt %d\n", - window->success_ratio, - window->average_tpt, - lq_sta->last_tpt); - - /* Swap tables; "search" becomes "active" */ - lq_sta->active_tbl = active_tbl; - current_tpt = window->average_tpt; - /* Else poor success; go back to mode in "active" table */ - } else { - IWL_DEBUG_RATE(mvm, - "GOING BACK TO THE OLD TABLE: SR %d " - "cur-tpt %d old-tpt %d\n", - window->success_ratio, - window->average_tpt, - lq_sta->last_tpt); - - /* Nullify "search" table */ - rate->type = LQ_NONE; - - /* Revert to "active" table */ - active_tbl = lq_sta->active_tbl; - tbl = &(lq_sta->lq_info[active_tbl]); - - /* Revert to "active" rate and throughput info */ - index = tbl->rate.index; - current_tpt = lq_sta->last_tpt; - - /* Need to set up a new rate table in uCode */ - update_lq = 1; - } - - /* Either way, we've made a decision; modulation mode - * search is done, allow rate adjustment next time. */ - lq_sta->search_better_tbl = 0; - done_search = 1; /* Don't switch modes below! */ - goto lq_update; - } - - /* (Else) not in search of better modulation mode, try for better - * starting rate, while staying in this mode. */ - high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type); - low = high_low & 0xff; - high = (high_low >> 8) & 0xff; - - /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ - - sr = window->success_ratio; - - /* Collect measured throughputs for current and adjacent rates */ - current_tpt = window->average_tpt; - if (low != IWL_RATE_INVALID) - low_tpt = tbl->win[low].average_tpt; - if (high != IWL_RATE_INVALID) - high_tpt = tbl->win[high].average_tpt; - - IWL_DEBUG_RATE(mvm, - "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n", - rs_pretty_rate(rate), current_tpt, sr, - low, high, low_tpt, high_tpt); - - scale_action = rs_get_rate_action(mvm, tbl, sr, low, high, - current_tpt, low_tpt, high_tpt); - - /* Force a search in case BT doesn't like us being in MIMO */ - if (is_mimo(rate) && - !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) { - IWL_DEBUG_RATE(mvm, - "BT Coex forbids MIMO. Search for new config\n"); - rs_stay_in_table(lq_sta, true); - goto lq_update; - } - - switch (scale_action) { - case RS_ACTION_DOWNSCALE: - /* Decrease starting rate, update uCode's rate table */ - if (low != IWL_RATE_INVALID) { - update_lq = 1; - index = low; - } else { - IWL_DEBUG_RATE(mvm, - "At the bottom rate. Can't decrease\n"); - } - - break; - case RS_ACTION_UPSCALE: - /* Increase starting rate, update uCode's rate table */ - if (high != IWL_RATE_INVALID) { - update_lq = 1; - index = high; - } else { - IWL_DEBUG_RATE(mvm, - "At the top rate. Can't increase\n"); - } - - break; - case RS_ACTION_STAY: - /* No change */ - if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) - update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl); - break; - default: - break; - } - -lq_update: - /* Replace uCode's rate table for the destination station. */ - if (update_lq) { - tbl->rate.index = index; - if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) - rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); - rs_update_rate_tbl(mvm, sta, lq_sta, tbl); - } - - rs_stay_in_table(lq_sta, false); - - /* - * Search for new modulation mode if we're: - * 1) Not changing rates right now - * 2) Not just finishing up a search - * 3) Allowing a new search - */ - if (!update_lq && !done_search && - lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED - && window->counter) { - enum rs_column next_column; - - /* Save current throughput to compare with "search" throughput*/ - lq_sta->last_tpt = current_tpt; - - IWL_DEBUG_RATE(mvm, - "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n", - update_lq, done_search, lq_sta->rs_state, - window->counter); - - next_column = rs_get_next_column(mvm, lq_sta, sta, tbl); - if (next_column != RS_COLUMN_INVALID) { - int ret = rs_switch_to_column(mvm, lq_sta, sta, - next_column); - if (!ret) - lq_sta->search_better_tbl = 1; - } else { - IWL_DEBUG_RATE(mvm, - "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n"); - lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED; - } - - /* If new "search" mode was selected, set up in uCode table */ - if (lq_sta->search_better_tbl) { - /* Access the "search" table, clear its history. */ - tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - rs_rate_scale_clear_tbl_windows(mvm, tbl); - - /* Use new "search" start rate */ - index = tbl->rate.index; - - rs_dump_rate(mvm, &tbl->rate, - "Switch to SEARCH TABLE:"); - rs_update_rate_tbl(mvm, sta, lq_sta, tbl); - } else { - done_search = 1; - } - } - - if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) { - /* If the "active" (non-search) mode was legacy, - * and we've tried switching antennas, - * but we haven't been able to try HT modes (not available), - * stay with best antenna legacy modulation for a while - * before next round of mode comparisons. */ - tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(&tbl1->rate)) { - IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); - - if (tid != IWL_MAX_TID_COUNT) { - tid_data = &sta_priv->tid_data[tid]; - if (tid_data->state != IWL_AGG_OFF) { - IWL_DEBUG_RATE(mvm, - "Stop aggregation on tid %d\n", - tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - } - rs_set_stay_in_table(mvm, 1, lq_sta); - } else { - /* If we're in an HT mode, and all 3 mode switch actions - * have been tried and compared, stay in this best modulation - * mode for a while before next round of mode comparisons. */ - if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && - (lq_sta->tx_agg_tid_en & (1 << tid)) && - (tid != IWL_MAX_TID_COUNT)) { - tid_data = &sta_priv->tid_data[tid]; - if (tid_data->state == IWL_AGG_OFF) { - IWL_DEBUG_RATE(mvm, - "try to aggregate tid %d\n", - tid); - rs_tl_turn_on_agg(mvm, tid, - lq_sta, sta); - } - } - rs_set_stay_in_table(mvm, 0, lq_sta); - } - } -} - -struct rs_init_rate_info { - s8 rssi; - u8 rate_idx; -}; - -static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = { - { -60, IWL_RATE_54M_INDEX }, - { -64, IWL_RATE_48M_INDEX }, - { -68, IWL_RATE_36M_INDEX }, - { -80, IWL_RATE_24M_INDEX }, - { -84, IWL_RATE_18M_INDEX }, - { -85, IWL_RATE_12M_INDEX }, - { -86, IWL_RATE_11M_INDEX }, - { -88, IWL_RATE_5M_INDEX }, - { -90, IWL_RATE_2M_INDEX }, - { S8_MIN, IWL_RATE_1M_INDEX }, -}; - -static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = { - { -60, IWL_RATE_54M_INDEX }, - { -64, IWL_RATE_48M_INDEX }, - { -72, IWL_RATE_36M_INDEX }, - { -80, IWL_RATE_24M_INDEX }, - { -84, IWL_RATE_18M_INDEX }, - { -85, IWL_RATE_12M_INDEX }, - { -87, IWL_RATE_9M_INDEX }, - { S8_MIN, IWL_RATE_6M_INDEX }, -}; - -static const struct rs_init_rate_info rs_optimal_rates_ht[] = { - { -60, IWL_RATE_MCS_7_INDEX }, - { -64, IWL_RATE_MCS_6_INDEX }, - { -68, IWL_RATE_MCS_5_INDEX }, - { -72, IWL_RATE_MCS_4_INDEX }, - { -80, IWL_RATE_MCS_3_INDEX }, - { -84, IWL_RATE_MCS_2_INDEX }, - { -85, IWL_RATE_MCS_1_INDEX }, - { S8_MIN, IWL_RATE_MCS_0_INDEX}, -}; - -static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { - { -60, IWL_RATE_MCS_8_INDEX }, - { -64, IWL_RATE_MCS_7_INDEX }, - { -68, IWL_RATE_MCS_6_INDEX }, - { -72, IWL_RATE_MCS_5_INDEX }, - { -80, IWL_RATE_MCS_4_INDEX }, - { -84, IWL_RATE_MCS_3_INDEX }, - { -85, IWL_RATE_MCS_2_INDEX }, - { -87, IWL_RATE_MCS_1_INDEX }, - { S8_MIN, IWL_RATE_MCS_0_INDEX}, -}; - -static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = { - { -60, IWL_RATE_MCS_9_INDEX }, - { -64, IWL_RATE_MCS_8_INDEX }, - { -68, IWL_RATE_MCS_7_INDEX }, - { -72, IWL_RATE_MCS_6_INDEX }, - { -80, IWL_RATE_MCS_5_INDEX }, - { -84, IWL_RATE_MCS_4_INDEX }, - { -85, IWL_RATE_MCS_3_INDEX }, - { -87, IWL_RATE_MCS_2_INDEX }, - { -88, IWL_RATE_MCS_1_INDEX }, - { S8_MIN, IWL_RATE_MCS_0_INDEX }, -}; - -/* Init the optimal rate based on STA caps - * This combined with rssi is used to report the last tx rate - * to userspace when we haven't transmitted enough frames. - */ -static void rs_init_optimal_rate(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) -{ - struct rs_rate *rate = &lq_sta->optimal_rate; - - if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID) - rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; - else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) - rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; - else if (lq_sta->band == IEEE80211_BAND_5GHZ) - rate->type = LQ_LEGACY_A; - else - rate->type = LQ_LEGACY_G; - - rate->bw = rs_bw_from_sta_bw(sta); - rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL); - - /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */ - - if (is_mimo(rate)) { - lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate; - } else if (is_siso(rate)) { - lq_sta->optimal_rate_mask = lq_sta->active_siso_rate; - } else { - lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; - - if (lq_sta->band == IEEE80211_BAND_5GHZ) { - lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; - lq_sta->optimal_nentries = - ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); - } else { - lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy; - lq_sta->optimal_nentries = - ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); - } - } - - if (is_vht(rate)) { - if (rate->bw == RATE_MCS_CHAN_WIDTH_20) { - lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz; - lq_sta->optimal_nentries = - ARRAY_SIZE(rs_optimal_rates_vht_20mhz); - } else { - lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz; - lq_sta->optimal_nentries = - ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); - } - } else if (is_ht(rate)) { - lq_sta->optimal_rates = rs_optimal_rates_ht; - lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht); - } -} - -/* Compute the optimal rate index based on RSSI */ -static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta) -{ - struct rs_rate *rate = &lq_sta->optimal_rate; - int i; - - rate->index = find_first_bit(&lq_sta->optimal_rate_mask, - BITS_PER_LONG); - - for (i = 0; i < lq_sta->optimal_nentries; i++) { - int rate_idx = lq_sta->optimal_rates[i].rate_idx; - - if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) && - (BIT(rate_idx) & lq_sta->optimal_rate_mask)) { - rate->index = rate_idx; - break; - } - } - - return rate; -} - -/* Choose an initial legacy rate and antenna to use based on the RSSI - * of last Rx - */ -static void rs_get_initial_rate(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - enum ieee80211_band band, - struct rs_rate *rate) -{ - int i, nentries; - s8 best_rssi = S8_MIN; - u8 best_ant = ANT_NONE; - u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); - const struct rs_init_rate_info *initial_rates; - - for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { - if (!(lq_sta->pers.chains & BIT(i))) - continue; - - if (lq_sta->pers.chain_signal[i] > best_rssi) { - best_rssi = lq_sta->pers.chain_signal[i]; - best_ant = BIT(i); - } - } - - IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n", - rs_pretty_ant(best_ant), best_rssi); - - if (best_ant != ANT_A && best_ant != ANT_B) - rate->ant = first_antenna(valid_tx_ant); - else - rate->ant = best_ant; - - rate->sgi = false; - rate->ldpc = false; - rate->bw = RATE_MCS_CHAN_WIDTH_20; - - rate->index = find_first_bit(&lq_sta->active_legacy_rate, - BITS_PER_LONG); - - if (band == IEEE80211_BAND_5GHZ) { - rate->type = LQ_LEGACY_A; - initial_rates = rs_optimal_rates_5ghz_legacy; - nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); - } else { - rate->type = LQ_LEGACY_G; - initial_rates = rs_optimal_rates_24ghz_legacy; - nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); - } - - if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) { - for (i = 0; i < nentries; i++) { - int rate_idx = initial_rates[i].rate_idx; - if ((best_rssi >= initial_rates[i].rssi) && - (BIT(rate_idx) & lq_sta->active_legacy_rate)) { - rate->index = rate_idx; - break; - } - } - } - - IWL_DEBUG_RATE(mvm, "rate_idx %d ANT %s\n", rate->index, - rs_pretty_ant(rate->ant)); -} - -/* Save info about RSSI of last Rx */ -void rs_update_last_rssi(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct ieee80211_rx_status *rx_status) -{ - int i; - - lq_sta->pers.chains = rx_status->chains; - lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; - lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; - lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; - lq_sta->pers.last_rssi = S8_MIN; - - for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { - if (!(lq_sta->pers.chains & BIT(i))) - continue; - - if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi) - lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i]; - } -} - -/** - * rs_initialize_lq - Initialize a station's hardware rate table - * - * The uCode's station table contains a table of fallback rates - * for automatic fallback during transmission. - * - * NOTE: This sets up a default set of values. These will be replaced later - * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of - * rc80211_simple. - * - * NOTE: Run REPLY_ADD_STA command to set up station table entry, before - * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, - * which requires station table entry to exist). - */ -static void rs_initialize_lq(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - enum ieee80211_band band, - bool init) -{ - struct iwl_scale_tbl_info *tbl; - struct rs_rate *rate; - u8 active_tbl = 0; - - if (!sta || !lq_sta) - return; - - if (!lq_sta->search_better_tbl) - active_tbl = lq_sta->active_tbl; - else - active_tbl = 1 - lq_sta->active_tbl; - - tbl = &(lq_sta->lq_info[active_tbl]); - rate = &tbl->rate; - - rs_get_initial_rate(mvm, lq_sta, band, rate); - rs_init_optimal_rate(mvm, sta, lq_sta); - - WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); - if (rate->ant == ANT_A) - tbl->column = RS_COLUMN_LEGACY_ANT_A; - else - tbl->column = RS_COLUMN_LEGACY_ANT_B; - - rs_set_expected_tpt_table(lq_sta, tbl); - rs_fill_lq_cmd(mvm, sta, lq_sta, rate); - /* TODO restore station should remember the lq cmd */ - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init); -} - -static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, - struct ieee80211_tx_rate_control *txrc) -{ - struct sk_buff *skb = txrc->skb; - struct iwl_op_mode *op_mode __maybe_unused = - (struct iwl_op_mode *)mvm_r; - struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_lq_sta *lq_sta = mvm_sta; - struct rs_rate *optimal_rate; - u32 last_ucode_rate; - - if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { - /* if vif isn't initialized mvm doesn't know about - * this station, so don't do anything with the it - */ - sta = NULL; - mvm_sta = NULL; - } - - /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->pers.drv) { - IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); - mvm_sta = NULL; - } - - /* Send management frames and NO_ACK data using lowest rate. */ - if (rate_control_send_low(sta, mvm_sta, txrc)) - return; - - iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, - info->band, &info->control.rates[0]); - info->control.rates[0].count = 1; - - /* Report the optimal rate based on rssi and STA caps if we haven't - * converged yet (too little traffic) or exploring other modulations - */ - if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) { - optimal_rate = rs_get_optimal_rate(mvm, lq_sta); - last_ucode_rate = ucode_rate_from_rs_rate(mvm, - optimal_rate); - iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, - &txrc->reported_rate); - } -} - -static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, - gfp_t gfp) -{ - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); - struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; - - IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); - - lq_sta->pers.drv = mvm; -#ifdef CONFIG_MAC80211_DEBUGFS - lq_sta->pers.dbg_fixed_rate = 0; - lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID; - lq_sta->pers.ss_force = RS_SS_FORCE_NONE; -#endif - lq_sta->pers.chains = 0; - memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); - lq_sta->pers.last_rssi = S8_MIN; - - return &sta_priv->lq_sta; -} - -static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, - int nss) -{ - u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & - (0x3 << (2 * (nss - 1))); - rx_mcs >>= (2 * (nss - 1)); - - if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7) - return IWL_RATE_MCS_7_INDEX; - else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8) - return IWL_RATE_MCS_8_INDEX; - else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9) - return IWL_RATE_MCS_9_INDEX; - - WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED); - return -1; -} - -static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta, - struct ieee80211_sta_vht_cap *vht_cap, - struct iwl_lq_sta *lq_sta) -{ - int i; - int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1); - - if (highest_mcs >= IWL_RATE_MCS_0_INDEX) { - for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) { - if (i == IWL_RATE_9M_INDEX) - continue; - - /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ - if (i == IWL_RATE_MCS_9_INDEX && - sta->bandwidth == IEEE80211_STA_RX_BW_20) - continue; - - lq_sta->active_siso_rate |= BIT(i); - } - } - - if (sta->rx_nss < 2) - return; - - highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2); - if (highest_mcs >= IWL_RATE_MCS_0_INDEX) { - for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) { - if (i == IWL_RATE_9M_INDEX) - continue; - - /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ - if (i == IWL_RATE_MCS_9_INDEX && - sta->bandwidth == IEEE80211_STA_RX_BW_20) - continue; - - lq_sta->active_mimo2_rate |= BIT(i); - } - } -} - -static void rs_ht_init(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - struct ieee80211_sta_ht_cap *ht_cap) -{ - /* active_siso_rate mask includes 9 MBits (bit 5), - * and CCK (bits 0-3), supp_rates[] does not; - * shift to convert format, force 9 MBits off. - */ - lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; - lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; - lq_sta->active_siso_rate &= ~((u16)0x2); - lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; - - lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; - lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; - lq_sta->active_mimo2_rate &= ~((u16)0x2); - lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; - - if (mvm->cfg->ht_params->ldpc && - (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) - lq_sta->ldpc = true; - - if (mvm->cfg->ht_params->stbc && - (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && - (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) - lq_sta->stbc_capable = true; - - lq_sta->is_vht = false; -} - -static void rs_vht_init(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - struct ieee80211_sta_vht_cap *vht_cap) -{ - rs_vht_set_enabled_rates(sta, vht_cap, lq_sta); - - if (mvm->cfg->ht_params->ldpc && - (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)) - lq_sta->ldpc = true; - - if (mvm->cfg->ht_params->stbc && - (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && - (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) - lq_sta->stbc_capable = true; - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && - (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && - (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) - lq_sta->bfer_capable = true; - - lq_sta->is_vht = true; -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) -{ - spin_lock_bh(&mvm->drv_stats_lock); - memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats)); - spin_unlock_bh(&mvm->drv_stats_lock); -} - -void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) -{ - u8 nss = 0, mcs = 0; - - spin_lock(&mvm->drv_stats_lock); - - if (agg) - mvm->drv_rx_stats.agg_frames++; - - mvm->drv_rx_stats.success_frames++; - - switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - mvm->drv_rx_stats.bw_20_frames++; - break; - case RATE_MCS_CHAN_WIDTH_40: - mvm->drv_rx_stats.bw_40_frames++; - break; - case RATE_MCS_CHAN_WIDTH_80: - mvm->drv_rx_stats.bw_80_frames++; - break; - default: - WARN_ONCE(1, "bad BW. rate 0x%x", rate); - } - - if (rate & RATE_MCS_HT_MSK) { - mvm->drv_rx_stats.ht_frames++; - mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; - nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; - } else if (rate & RATE_MCS_VHT_MSK) { - mvm->drv_rx_stats.vht_frames++; - mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; - nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; - } else { - mvm->drv_rx_stats.legacy_frames++; - } - - if (nss == 1) - mvm->drv_rx_stats.siso_frames++; - else if (nss == 2) - mvm->drv_rx_stats.mimo2_frames++; - - if (rate & RATE_MCS_SGI_MSK) - mvm->drv_rx_stats.sgi_frames++; - else - mvm->drv_rx_stats.ngi_frames++; - - mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate; - mvm->drv_rx_stats.last_frame_idx = - (mvm->drv_rx_stats.last_frame_idx + 1) % - ARRAY_SIZE(mvm->drv_rx_stats.last_rates); - - spin_unlock(&mvm->drv_stats_lock); -} -#endif - -/* - * Called after adding a new station to initialize rate scaling - */ -void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum ieee80211_band band, bool init) -{ - int i, j; - struct ieee80211_hw *hw = mvm->hw; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; - struct ieee80211_supported_band *sband; - unsigned long supp; /* must be unsigned long for for_each_set_bit */ - - /* clear all non-persistent lq data */ - memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); - - sband = hw->wiphy->bands[band]; - - lq_sta->lq.sta_id = sta_priv->sta_id; - - for (j = 0; j < LQ_SIZE; j++) - rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); - - lq_sta->flush_timer = 0; - lq_sta->last_tx = jiffies; - - IWL_DEBUG_RATE(mvm, - "LQ: *** rate scale station global init for station %d ***\n", - sta_priv->sta_id); - /* TODO: what is a good starting rate for STA? About middle? Maybe not - * the lowest or the highest rate.. Could consider using RSSI from - * previous packets? Need to have IEEE 802.1X auth succeed immediately - * after assoc.. */ - - lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX; - lq_sta->band = sband->band; - /* - * active legacy rates as per supported rates bitmap - */ - supp = sta->supp_rates[sband->band]; - lq_sta->active_legacy_rate = 0; - for_each_set_bit(i, &supp, BITS_PER_LONG) - lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); - - /* TODO: should probably account for rx_highest for both HT/VHT */ - if (!vht_cap || !vht_cap->vht_supported) - rs_ht_init(mvm, sta, lq_sta, ht_cap); - else - rs_vht_init(mvm, sta, lq_sta, vht_cap); - - lq_sta->max_legacy_rate_idx = - rs_get_max_rate_from_mask(lq_sta->active_legacy_rate); - lq_sta->max_siso_rate_idx = - rs_get_max_rate_from_mask(lq_sta->active_siso_rate); - lq_sta->max_mimo2_rate_idx = - rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate); - - IWL_DEBUG_RATE(mvm, - "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n", - lq_sta->active_legacy_rate, - lq_sta->active_siso_rate, - lq_sta->active_mimo2_rate, - lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable, - lq_sta->bfer_capable); - IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", - lq_sta->max_legacy_rate_idx, - lq_sta->max_siso_rate_idx, - lq_sta->max_mimo2_rate_idx); - - /* These values will be overridden later */ - lq_sta->lq.single_stream_ant_msk = - first_antenna(iwl_mvm_get_valid_tx_ant(mvm)); - lq_sta->lq.dual_stream_ant_msk = ANT_AB; - - /* as default allow aggregation for all tids */ - lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; - lq_sta->is_agg = 0; -#ifdef CONFIG_IWLWIFI_DEBUGFS - iwl_mvm_reset_frame_stats(mvm); -#endif - rs_initialize_lq(mvm, sta, lq_sta, band, init); -} - -static void rs_rate_update(void *mvm_r, - struct ieee80211_supported_band *sband, - struct cfg80211_chan_def *chandef, - struct ieee80211_sta *sta, void *priv_sta, - u32 changed) -{ - u8 tid; - struct iwl_op_mode *op_mode = - (struct iwl_op_mode *)mvm_r; - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - if (!iwl_mvm_sta_from_mac80211(sta)->vif) - return; - - /* Stop any ongoing aggregations as rs starts off assuming no agg */ - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) - ieee80211_stop_tx_ba_session(sta, tid); - - iwl_mvm_rs_rate_init(mvm, sta, sband->band, false); -} - -#ifdef CONFIG_MAC80211_DEBUGFS -static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, - struct iwl_lq_cmd *lq_cmd, - enum ieee80211_band band, - u32 ucode_rate) -{ - struct rs_rate rate; - int i; - int num_rates = ARRAY_SIZE(lq_cmd->rs_table); - __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate); - u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; - - for (i = 0; i < num_rates; i++) - lq_cmd->rs_table[i] = ucode_rate_le32; - - rs_rate_from_ucode_rate(ucode_rate, band, &rate); - - if (is_mimo(&rate)) - lq_cmd->mimo_delim = num_rates - 1; - else - lq_cmd->mimo_delim = 0; - - lq_cmd->reduced_tpc = 0; - - if (num_of_ant(ant) == 1) - lq_cmd->single_stream_ant_msk = ant; - - lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; -} -#endif /* CONFIG_MAC80211_DEBUGFS */ - -static void rs_fill_rates_for_column(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct rs_rate *rate, - __le32 *rs_table, int *rs_table_index, - int num_rates, int num_retries, - u8 valid_tx_ant, bool toggle_ant) -{ - int i, j; - __le32 ucode_rate; - bool bottom_reached = false; - int prev_rate_idx = rate->index; - int end = LINK_QUAL_MAX_RETRY_NUM; - int index = *rs_table_index; - - for (i = 0; i < num_rates && index < end; i++) { - for (j = 0; j < num_retries && index < end; j++, index++) { - ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, - rate)); - rs_table[index] = ucode_rate; - if (toggle_ant) - rs_toggle_antenna(valid_tx_ant, rate); - } - - prev_rate_idx = rate->index; - bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate); - if (bottom_reached && !is_legacy(rate)) - break; - } - - if (!bottom_reached && !is_legacy(rate)) - rate->index = prev_rate_idx; - - *rs_table_index = index; -} - -/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI - * column the rate table should look like this: - * - * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI - * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI - * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI - * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI - * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI - * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI - * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI - * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI - * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI - * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps - * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps - * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps - * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps - * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps - * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps - * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps - */ -static void rs_build_rates_table(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - const struct rs_rate *initial_rate) -{ - struct rs_rate rate; - int num_rates, num_retries, index = 0; - u8 valid_tx_ant = 0; - struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; - bool toggle_ant = false; - - memcpy(&rate, initial_rate, sizeof(rate)); - - valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); - - /* TODO: remove old API when min FW API hits 14 */ - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) && - rs_stbc_allow(mvm, sta, lq_sta)) - rate.stbc = true; - - if (is_siso(&rate)) { - num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES; - num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; - } else if (is_mimo(&rate)) { - num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES; - num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; - } else { - num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES; - num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES; - toggle_ant = true; - } - - rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, - num_rates, num_retries, valid_tx_ant, - toggle_ant); - - rs_get_lower_rate_down_column(lq_sta, &rate); - - if (is_siso(&rate)) { - num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES; - num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES; - lq_cmd->mimo_delim = index; - } else if (is_legacy(&rate)) { - num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; - num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES; - } else { - WARN_ON_ONCE(1); - } - - toggle_ant = true; - - rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, - num_rates, num_retries, valid_tx_ant, - toggle_ant); - - rs_get_lower_rate_down_column(lq_sta, &rate); - - num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; - num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES; - - rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, - num_rates, num_retries, valid_tx_ant, - toggle_ant); - -} - -struct rs_bfer_active_iter_data { - struct ieee80211_sta *exclude_sta; - struct iwl_mvm_sta *bfer_mvmsta; -}; - -static void rs_bfer_active_iter(void *_data, - struct ieee80211_sta *sta) -{ - struct rs_bfer_active_iter_data *data = _data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq; - u32 ss_params = le32_to_cpu(lq_cmd->ss_params); - - if (sta == data->exclude_sta) - return; - - /* The current sta has BFER allowed */ - if (ss_params & LQ_SS_BFER_ALLOWED) { - WARN_ON_ONCE(data->bfer_mvmsta != NULL); - - data->bfer_mvmsta = mvmsta; - } -} - -static int rs_bfer_priority(struct iwl_mvm_sta *sta) -{ - int prio = -1; - enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif); - - switch (viftype) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_P2P_GO: - prio = 3; - break; - case NL80211_IFTYPE_P2P_CLIENT: - prio = 2; - break; - case NL80211_IFTYPE_STATION: - prio = 1; - break; - default: - WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id); - prio = -1; - } - - return prio; -} - -/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */ -static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1, - struct iwl_mvm_sta *sta2) -{ - int prio1 = rs_bfer_priority(sta1); - int prio2 = rs_bfer_priority(sta2); - - if (prio1 > prio2) - return 1; - if (prio1 < prio2) - return -1; - return 0; -} - -static void rs_set_lq_ss_params(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - const struct rs_rate *initial_rate) -{ - struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct rs_bfer_active_iter_data data = { - .exclude_sta = sta, - .bfer_mvmsta = NULL, - }; - struct iwl_mvm_sta *bfer_mvmsta = NULL; - u32 ss_params = LQ_SS_PARAMS_VALID; - - if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) - goto out; - -#ifdef CONFIG_MAC80211_DEBUGFS - /* Check if forcing the decision is configured. - * Note that SISO is forced by not allowing STBC or BFER - */ - if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC) - ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE); - else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER) - ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE); - - if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) { - IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n", - lq_sta->pers.ss_force); - goto out; - } -#endif - - if (lq_sta->stbc_capable) - ss_params |= LQ_SS_STBC_1SS_ALLOWED; - - if (!lq_sta->bfer_capable) - goto out; - - ieee80211_iterate_stations_atomic(mvm->hw, - rs_bfer_active_iter, - &data); - bfer_mvmsta = data.bfer_mvmsta; - - /* This code is safe as it doesn't run concurrently for different - * stations. This is guaranteed by the fact that calls to - * ieee80211_tx_status wouldn't run concurrently for a single HW. - */ - if (!bfer_mvmsta) { - IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n"); - - ss_params |= LQ_SS_BFER_ALLOWED; - goto out; - } - - IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n", - bfer_mvmsta->sta_id); - - /* Disallow BFER on another STA if active and we're a higher priority */ - if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) { - struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq; - u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params); - - bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; - bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params); - iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false); - - ss_params |= LQ_SS_BFER_ALLOWED; - IWL_DEBUG_RATE(mvm, - "Lower priority BFER sta found (%d). Switch BFER\n", - bfer_mvmsta->sta_id); - } -out: - lq_cmd->ss_params = cpu_to_le32(ss_params); -} - -static void rs_fill_lq_cmd(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta, - const struct rs_rate *initial_rate) -{ - struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; - struct iwl_mvm_sta *mvmsta; - struct iwl_mvm_vif *mvmvif; - - lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START; - lq_cmd->agg_time_limit = - cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT); - -#ifdef CONFIG_MAC80211_DEBUGFS - if (lq_sta->pers.dbg_fixed_rate) { - rs_build_rates_table_from_fixed(mvm, lq_cmd, - lq_sta->band, - lq_sta->pers.dbg_fixed_rate); - return; - } -#endif - if (WARN_ON_ONCE(!sta || !initial_rate)) - return; - - rs_build_rates_table(mvm, sta, lq_sta, initial_rate); - - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) - rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate); - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - - if (num_of_ant(initial_rate->ant) == 1) - lq_cmd->single_stream_ant_msk = initial_rate->ant; - - lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize; - - /* - * In case of low latency, tell the firmware to leave a frame in the - * Tx Fifo so that it can start a transaction in the same TxOP. This - * basically allows the firmware to send bursts. - */ - if (iwl_mvm_vif_low_latency(mvmvif)) { - lq_cmd->agg_frame_cnt_limit--; - - if (mvm->low_latency_agg_frame_limit) - lq_cmd->agg_frame_cnt_limit = - min(lq_cmd->agg_frame_cnt_limit, - mvm->low_latency_agg_frame_limit); - } - - if (mvmsta->vif->p2p) - lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK; - - lq_cmd->agg_time_limit = - cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); -} - -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) -{ - return hw->priv; -} -/* rate scale requires free function to be implemented */ -static void rs_free(void *mvm_rate) -{ - return; -} - -static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, - void *mvm_sta) -{ - struct iwl_op_mode *op_mode __maybe_unused = mvm_r; - struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); - - IWL_DEBUG_RATE(mvm, "enter\n"); - IWL_DEBUG_RATE(mvm, "leave\n"); -} - -#ifdef CONFIG_MAC80211_DEBUGFS -int rs_pretty_print_rate(char *buf, const u32 rate) -{ - - char *type, *bw; - u8 mcs = 0, nss = 0; - u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; - - if (!(rate & RATE_MCS_HT_MSK) && - !(rate & RATE_MCS_VHT_MSK)) { - int index = iwl_hwrate_to_plcp_idx(rate); - - return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n", - rs_pretty_ant(ant), - index == IWL_RATE_INVALID ? "BAD" : - iwl_rate_mcs[index].mbps); - } - - if (rate & RATE_MCS_VHT_MSK) { - type = "VHT"; - mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; - nss = ((rate & RATE_VHT_MCS_NSS_MSK) - >> RATE_VHT_MCS_NSS_POS) + 1; - } else if (rate & RATE_MCS_HT_MSK) { - type = "HT"; - mcs = rate & RATE_HT_MCS_INDEX_MSK; - } else { - type = "Unknown"; /* shouldn't happen */ - } - - switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - bw = "20Mhz"; - break; - case RATE_MCS_CHAN_WIDTH_40: - bw = "40Mhz"; - break; - case RATE_MCS_CHAN_WIDTH_80: - bw = "80Mhz"; - break; - case RATE_MCS_CHAN_WIDTH_160: - bw = "160Mhz"; - break; - default: - bw = "BAD BW"; - } - - return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n", - type, rs_pretty_ant(ant), bw, mcs, nss, - (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", - (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "", - (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", - (rate & RATE_MCS_BF_MSK) ? "BF " : "", - (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : ""); -} - -/** - * Program the device to use fixed rate for frame transmit - * This is for debugging/testing only - * once the device start use fixed rate, we need to reload the module - * to being back the normal operation. - */ -static void rs_program_fix_rate(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta) -{ - lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ - lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - - IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", - lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate); - - if (lq_sta->pers.dbg_fixed_rate) { - rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); - iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false); - } -} - -static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_mvm *mvm; - char buf[64]; - size_t buf_size; - u32 parsed_rate; - - mvm = lq_sta->pers.drv; - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - if (sscanf(buf, "%x", &parsed_rate) == 1) - lq_sta->pers.dbg_fixed_rate = parsed_rate; - else - lq_sta->pers.dbg_fixed_rate = 0; - - rs_program_fix_rate(mvm, lq_sta); - - return count; -} - -static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i = 0; - ssize_t ret; - - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_mvm *mvm; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct rs_rate *rate = &tbl->rate; - u32 ss_params; - mvm = lq_sta->pers.drv; - buff = kmalloc(2048, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); - desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n", - lq_sta->total_failed, lq_sta->total_success, - lq_sta->active_legacy_rate); - desc += sprintf(buff+desc, "fixed rate 0x%X\n", - lq_sta->pers.dbg_fixed_rate); - desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); - desc += sprintf(buff+desc, "lq type %s\n", - (is_legacy(rate)) ? "legacy" : - is_vht(rate) ? "VHT" : "HT"); - if (!is_legacy(rate)) { - desc += sprintf(buff + desc, " %s", - (is_siso(rate)) ? "SISO" : "MIMO2"); - desc += sprintf(buff + desc, " %s", - (is_ht20(rate)) ? "20MHz" : - (is_ht40(rate)) ? "40MHz" : - (is_ht80(rate)) ? "80Mhz" : "BAD BW"); - desc += sprintf(buff + desc, " %s %s %s\n", - (rate->sgi) ? "SGI" : "NGI", - (rate->ldpc) ? "LDPC" : "BCC", - (lq_sta->is_agg) ? "AGG on" : ""); - } - desc += sprintf(buff+desc, "last tx rate=0x%X\n", - lq_sta->last_rate_n_flags); - desc += sprintf(buff+desc, - "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n", - lq_sta->lq.flags, - lq_sta->lq.mimo_delim, - lq_sta->lq.single_stream_ant_msk, - lq_sta->lq.dual_stream_ant_msk); - - desc += sprintf(buff+desc, - "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", - le16_to_cpu(lq_sta->lq.agg_time_limit), - lq_sta->lq.agg_disable_start_th, - lq_sta->lq.agg_frame_cnt_limit); - - desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); - ss_params = le32_to_cpu(lq_sta->lq.ss_params); - desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n", - (ss_params & LQ_SS_PARAMS_VALID) ? - "VALID" : "INVALID", - (ss_params & LQ_SS_BFER_ALLOWED) ? - ", BFER" : "", - (ss_params & LQ_SS_STBC_1SS_ALLOWED) ? - ", STBC" : "", - (ss_params & LQ_SS_FORCE) ? - ", FORCE" : ""); - desc += sprintf(buff+desc, - "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", - lq_sta->lq.initial_rate_index[0], - lq_sta->lq.initial_rate_index[1], - lq_sta->lq.initial_rate_index[2], - lq_sta->lq.initial_rate_index[3]); - - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { - u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]); - - desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r); - desc += rs_pretty_print_rate(buff+desc, r); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_scale_table_ops = { - .write = rs_sta_dbgfs_scale_table_write, - .read = rs_sta_dbgfs_scale_table_read, - .open = simple_open, - .llseek = default_llseek, -}; -static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char *buff; - int desc = 0; - int i, j; - ssize_t ret; - struct iwl_scale_tbl_info *tbl; - struct rs_rate *rate; - struct iwl_lq_sta *lq_sta = file->private_data; - - buff = kmalloc(1024, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - for (i = 0; i < LQ_SIZE; i++) { - tbl = &(lq_sta->lq_info[i]); - rate = &tbl->rate; - desc += sprintf(buff+desc, - "%s type=%d SGI=%d BW=%s DUP=0\n" - "index=%d\n", - lq_sta->active_tbl == i ? "*" : "x", - rate->type, - rate->sgi, - is_ht20(rate) ? "20Mhz" : - is_ht40(rate) ? "40Mhz" : - is_ht80(rate) ? "80Mhz" : "ERR", - rate->index); - for (j = 0; j < IWL_RATE_COUNT; j++) { - desc += sprintf(buff+desc, - "counter=%d success=%d %%=%d\n", - tbl->win[j].counter, - tbl->win[j].success_counter, - tbl->win[j].success_ratio); - } - } - ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); - kfree(buff); - return ret; -} - -static const struct file_operations rs_sta_dbgfs_stats_table_ops = { - .read = rs_sta_dbgfs_stats_table_read, - .open = simple_open, - .llseek = default_llseek, -}; - -static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - static const char * const column_name[] = { - [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A", - [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B", - [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A", - [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B", - [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI", - [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI", - [RS_COLUMN_MIMO2] = "MIMO2", - [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI", - }; - - static const char * const rate_name[] = { - [IWL_RATE_1M_INDEX] = "1M", - [IWL_RATE_2M_INDEX] = "2M", - [IWL_RATE_5M_INDEX] = "5.5M", - [IWL_RATE_11M_INDEX] = "11M", - [IWL_RATE_6M_INDEX] = "6M|MCS0", - [IWL_RATE_9M_INDEX] = "9M", - [IWL_RATE_12M_INDEX] = "12M|MCS1", - [IWL_RATE_18M_INDEX] = "18M|MCS2", - [IWL_RATE_24M_INDEX] = "24M|MCS3", - [IWL_RATE_36M_INDEX] = "36M|MCS4", - [IWL_RATE_48M_INDEX] = "48M|MCS5", - [IWL_RATE_54M_INDEX] = "54M|MCS6", - [IWL_RATE_MCS_7_INDEX] = "MCS7", - [IWL_RATE_MCS_8_INDEX] = "MCS8", - [IWL_RATE_MCS_9_INDEX] = "MCS9", - }; - - char *buff, *pos, *endpos; - int col, rate; - ssize_t ret; - struct iwl_lq_sta *lq_sta = file->private_data; - struct rs_rate_stats *stats; - static const size_t bufsz = 1024; - - buff = kmalloc(bufsz, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - pos = buff; - endpos = pos + bufsz; - - pos += scnprintf(pos, endpos - pos, "COLUMN,"); - for (rate = 0; rate < IWL_RATE_COUNT; rate++) - pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]); - pos += scnprintf(pos, endpos - pos, "\n"); - - for (col = 0; col < RS_COLUMN_COUNT; col++) { - pos += scnprintf(pos, endpos - pos, - "%s,", column_name[col]); - - for (rate = 0; rate < IWL_RATE_COUNT; rate++) { - stats = &(lq_sta->pers.tx_stats[col][rate]); - pos += scnprintf(pos, endpos - pos, - "%llu/%llu,", - stats->success, - stats->total); - } - pos += scnprintf(pos, endpos - pos, "\n"); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); - kfree(buff); - return ret; -} - -static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats)); - - return count; -} - -static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = { - .read = rs_sta_dbgfs_drv_tx_stats_read, - .write = rs_sta_dbgfs_drv_tx_stats_write, - .open = simple_open, - .llseek = default_llseek, -}; - -static ssize_t iwl_dbgfs_ss_force_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - char buf[12]; - int bufsz = sizeof(buf); - int pos = 0; - static const char * const ss_force_name[] = { - [RS_SS_FORCE_NONE] = "none", - [RS_SS_FORCE_STBC] = "stbc", - [RS_SS_FORCE_BFER] = "bfer", - [RS_SS_FORCE_SISO] = "siso", - }; - - pos += scnprintf(buf+pos, bufsz-pos, "%s\n", - ss_force_name[lq_sta->pers.ss_force]); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = lq_sta->pers.drv; - int ret = 0; - - if (!strncmp("none", buf, 4)) { - lq_sta->pers.ss_force = RS_SS_FORCE_NONE; - } else if (!strncmp("siso", buf, 4)) { - lq_sta->pers.ss_force = RS_SS_FORCE_SISO; - } else if (!strncmp("stbc", buf, 4)) { - if (lq_sta->stbc_capable) { - lq_sta->pers.ss_force = RS_SS_FORCE_STBC; - } else { - IWL_ERR(mvm, - "can't force STBC. peer doesn't support\n"); - ret = -EINVAL; - } - } else if (!strncmp("bfer", buf, 4)) { - if (lq_sta->bfer_capable) { - lq_sta->pers.ss_force = RS_SS_FORCE_BFER; - } else { - IWL_ERR(mvm, - "can't force BFER. peer doesn't support\n"); - ret = -EINVAL; - } - } else { - IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n"); - ret = -EINVAL; - } - return ret ?: count; -} - -#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta) -#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, lq_sta, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ - } while (0) - -MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); - -static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - struct iwl_mvm_sta *mvmsta; - - mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta); - - if (!mvmsta->vif) - return; - - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); - debugfs_create_file("rate_stats_table", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); - debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir, - lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops); - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, - &lq_sta->tx_agg_tid_en); - debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir, - &lq_sta->pers.dbg_fixed_txp_reduction); - - MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR); - return; -err: - IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n"); -} - -static void rs_remove_debugfs(void *mvm, void *mvm_sta) -{ -} -#endif - -/* - * Initialization of rate scaling information is done by driver after - * the station is added. Since mac80211 calls this function before a - * station is added we ignore it. - */ -static void rs_rate_init_stub(void *mvm_r, - struct ieee80211_supported_band *sband, - struct cfg80211_chan_def *chandef, - struct ieee80211_sta *sta, void *mvm_sta) -{ -} - -static const struct rate_control_ops rs_mvm_ops = { - .name = RS_NAME, - .tx_status = rs_mac80211_tx_status, - .get_rate = rs_get_rate, - .rate_init = rs_rate_init_stub, - .alloc = rs_alloc, - .free = rs_free, - .alloc_sta = rs_alloc_sta, - .free_sta = rs_free_sta, - .rate_update = rs_rate_update, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = rs_add_debugfs, - .remove_sta_debugfs = rs_remove_debugfs, -#endif -}; - -int iwl_mvm_rate_control_register(void) -{ - return ieee80211_rate_control_register(&rs_mvm_ops); -} - -void iwl_mvm_rate_control_unregister(void) -{ - ieee80211_rate_control_unregister(&rs_mvm_ops); -} - -/** - * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable - * Tx protection, according to this request and previous requests, - * and send the LQ command. - * @mvmsta: The station - * @enable: Enable Tx protection? - */ -int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - bool enable) -{ - struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq; - - lockdep_assert_held(&mvm->mutex); - - if (enable) { - if (mvmsta->tx_protection == 0) - lq->flags |= LQ_FLAG_USE_RTS_MSK; - mvmsta->tx_protection++; - } else { - mvmsta->tx_protection--; - if (mvmsta->tx_protection == 0) - lq->flags &= ~LQ_FLAG_USE_RTS_MSK; - } - - return iwl_mvm_send_lq_cmd(mvm, lq, false); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h deleted file mode 100644 index 81314ad9ebe0..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ /dev/null @@ -1,392 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#ifndef __rs_h__ -#define __rs_h__ - -#include - -#include "iwl-config.h" - -#include "fw-api.h" -#include "iwl-trans.h" - -struct iwl_rs_rate_info { - u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ - u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ - u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ - u8 plcp_vht_siso; - u8 plcp_vht_mimo2; - u8 prev_rs; /* previous rate used in rs algo */ - u8 next_rs; /* next rate used in rs algo */ -}; - -#define IWL_RATE_60M_PLCP 3 - -enum { - IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, - IWL_RATE_INVALID = IWL_RATE_COUNT, -}; - -#define LINK_QUAL_MAX_RETRY_NUM 16 - -enum { - IWL_RATE_6M_INDEX_TABLE = 0, - IWL_RATE_9M_INDEX_TABLE, - IWL_RATE_12M_INDEX_TABLE, - IWL_RATE_18M_INDEX_TABLE, - IWL_RATE_24M_INDEX_TABLE, - IWL_RATE_36M_INDEX_TABLE, - IWL_RATE_48M_INDEX_TABLE, - IWL_RATE_54M_INDEX_TABLE, - IWL_RATE_1M_INDEX_TABLE, - IWL_RATE_2M_INDEX_TABLE, - IWL_RATE_5M_INDEX_TABLE, - IWL_RATE_11M_INDEX_TABLE, - IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, -}; - -/* #define vs. enum to keep from defaulting to 'large integer' */ -#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) -#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) -#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) -#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) -#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) -#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) -#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) -#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) -#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) -#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) -#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) -#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) -#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) - - -/* uCode API values for HT/VHT bit rates */ -enum { - IWL_RATE_HT_SISO_MCS_0_PLCP = 0, - IWL_RATE_HT_SISO_MCS_1_PLCP = 1, - IWL_RATE_HT_SISO_MCS_2_PLCP = 2, - IWL_RATE_HT_SISO_MCS_3_PLCP = 3, - IWL_RATE_HT_SISO_MCS_4_PLCP = 4, - IWL_RATE_HT_SISO_MCS_5_PLCP = 5, - IWL_RATE_HT_SISO_MCS_6_PLCP = 6, - IWL_RATE_HT_SISO_MCS_7_PLCP = 7, - IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8, - IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9, - IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA, - IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB, - IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC, - IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD, - IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE, - IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF, - IWL_RATE_VHT_SISO_MCS_0_PLCP = 0, - IWL_RATE_VHT_SISO_MCS_1_PLCP = 1, - IWL_RATE_VHT_SISO_MCS_2_PLCP = 2, - IWL_RATE_VHT_SISO_MCS_3_PLCP = 3, - IWL_RATE_VHT_SISO_MCS_4_PLCP = 4, - IWL_RATE_VHT_SISO_MCS_5_PLCP = 5, - IWL_RATE_VHT_SISO_MCS_6_PLCP = 6, - IWL_RATE_VHT_SISO_MCS_7_PLCP = 7, - IWL_RATE_VHT_SISO_MCS_8_PLCP = 8, - IWL_RATE_VHT_SISO_MCS_9_PLCP = 9, - IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10, - IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11, - IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12, - IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13, - IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14, - IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15, - IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16, - IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17, - IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18, - IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19, - IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, - IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP, -}; - -#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) - -#define IWL_INVALID_VALUE -1 - -#define TPC_MAX_REDUCTION 15 -#define TPC_NO_REDUCTION 0 -#define TPC_INVALID 0xff - -#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) -#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) - -#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ - -/* load per tid defines for A-MPDU activation */ -#define IWL_AGG_TPT_THREHOLD 0 -#define IWL_AGG_ALL_TID 0xff - -enum iwl_table_type { - LQ_NONE, - LQ_LEGACY_G, /* legacy types */ - LQ_LEGACY_A, - LQ_HT_SISO, /* HT types */ - LQ_HT_MIMO2, - LQ_VHT_SISO, /* VHT types */ - LQ_VHT_MIMO2, - LQ_MAX, -}; - -struct rs_rate { - int index; - enum iwl_table_type type; - u8 ant; - u32 bw; - bool sgi; - bool ldpc; - bool stbc; - bool bfer; -}; - - -#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \ - ((type) == LQ_LEGACY_A)) -#define is_type_ht_siso(type) ((type) == LQ_HT_SISO) -#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2) -#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO) -#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2) -#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type)) -#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type)) -#define is_type_mimo(type) (is_type_mimo2(type)) -#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type)) -#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type)) -#define is_type_a_band(type) ((type) == LQ_LEGACY_A) -#define is_type_g_band(type) ((type) == LQ_LEGACY_G) - -#define is_legacy(rate) is_type_legacy((rate)->type) -#define is_ht_siso(rate) is_type_ht_siso((rate)->type) -#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type) -#define is_vht_siso(rate) is_type_vht_siso((rate)->type) -#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type) -#define is_siso(rate) is_type_siso((rate)->type) -#define is_mimo2(rate) is_type_mimo2((rate)->type) -#define is_mimo(rate) is_type_mimo((rate)->type) -#define is_ht(rate) is_type_ht((rate)->type) -#define is_vht(rate) is_type_vht((rate)->type) -#define is_a_band(rate) is_type_a_band((rate)->type) -#define is_g_band(rate) is_type_g_band((rate)->type) - -#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20) -#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40) -#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80) - -#define IWL_MAX_MCS_DISPLAY_SIZE 12 - -struct iwl_rate_mcs_info { - char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; - char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; -}; - -/** - * struct iwl_rate_scale_data -- tx success history for one rate - */ -struct iwl_rate_scale_data { - u64 data; /* bitmap of successful frames */ - s32 success_counter; /* number of frames successful */ - s32 success_ratio; /* per-cent * 128 */ - s32 counter; /* number of frames attempted */ - s32 average_tpt; /* success ratio * expected throughput */ -}; - -/* Possible Tx columns - * Tx Column = a combo of legacy/siso/mimo x antenna x SGI - */ -enum rs_column { - RS_COLUMN_LEGACY_ANT_A = 0, - RS_COLUMN_LEGACY_ANT_B, - RS_COLUMN_SISO_ANT_A, - RS_COLUMN_SISO_ANT_B, - RS_COLUMN_SISO_ANT_A_SGI, - RS_COLUMN_SISO_ANT_B_SGI, - RS_COLUMN_MIMO2, - RS_COLUMN_MIMO2_SGI, - - RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI, - RS_COLUMN_COUNT = RS_COLUMN_LAST + 1, - RS_COLUMN_INVALID, -}; - -enum rs_ss_force_opt { - RS_SS_FORCE_NONE = 0, - RS_SS_FORCE_STBC, - RS_SS_FORCE_BFER, - RS_SS_FORCE_SISO, -}; - -/* Packet stats per rate */ -struct rs_rate_stats { - u64 success; - u64 total; -}; - -/** - * struct iwl_scale_tbl_info -- tx params and success history for all rates - * - * There are two of these in struct iwl_lq_sta, - * one for "active", and one for "search". - */ -struct iwl_scale_tbl_info { - struct rs_rate rate; - enum rs_column column; - const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ - struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ - /* per txpower-reduction history */ - struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1]; -}; - -enum { - RS_STATE_SEARCH_CYCLE_STARTED, - RS_STATE_SEARCH_CYCLE_ENDED, - RS_STATE_STAY_IN_COLUMN, -}; - -/** - * struct iwl_lq_sta -- driver's rate scaling private structure - * - * Pointer to this gets passed back and forth between driver and mac80211. - */ -struct iwl_lq_sta { - u8 active_tbl; /* index of active table, range 0-1 */ - u8 rs_state; /* RS_STATE_* */ - u8 search_better_tbl; /* 1: currently trying alternate mode */ - s32 last_tpt; - - /* The following determine when to search for a new mode */ - u32 table_count_limit; - u32 max_failure_limit; /* # failed frames before new search */ - u32 max_success_limit; /* # successful frames before new search */ - u32 table_count; - u32 total_failed; /* total failed frames, any/all rates */ - u32 total_success; /* total successful frames, any/all rates */ - u64 flush_timer; /* time staying in mode before new search */ - - u32 visited_columns; /* Bitmask marking which Tx columns were - * explored during a search cycle - */ - u64 last_tx; - bool is_vht; - bool ldpc; /* LDPC Rx is supported by the STA */ - bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */ - bool bfer_capable; /* Remote supports beamformee and we BFer */ - - enum ieee80211_band band; - - /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ - unsigned long active_legacy_rate; - unsigned long active_siso_rate; - unsigned long active_mimo2_rate; - - /* Highest rate per Tx mode */ - u8 max_legacy_rate_idx; - u8 max_siso_rate_idx; - u8 max_mimo2_rate_idx; - - /* Optimal rate based on RSSI and STA caps. - * Used only to reflect link speed to userspace. - */ - struct rs_rate optimal_rate; - unsigned long optimal_rate_mask; - const struct rs_init_rate_info *optimal_rates; - int optimal_nentries; - - u8 missed_rate_counter; - - struct iwl_lq_cmd lq; - struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - u8 tx_agg_tid_en; - - /* last tx rate_n_flags */ - u32 last_rate_n_flags; - /* packets destined for this STA are aggregated */ - u8 is_agg; - - /* tx power reduce for this sta */ - int tpc_reduce; - - /* persistent fields - initialized only once - keep last! */ - struct lq_sta_pers { -#ifdef CONFIG_MAC80211_DEBUGFS - u32 dbg_fixed_rate; - u8 dbg_fixed_txp_reduction; - - /* force STBC/BFER/SISO for testing */ - enum rs_ss_force_opt ss_force; -#endif - u8 chains; - s8 chain_signal[IEEE80211_MAX_CHAINS]; - s8 last_rssi; - struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; - struct iwl_mvm *drv; - } pers; -}; - -/* Initialize station's rate scaling information after adding station */ -void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum ieee80211_band band, bool init); - -/* Notify RS about Tx status */ -void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, struct ieee80211_tx_info *info); - -/** - * iwl_rate_control_register - Register the rate control algorithm callbacks - * - * Since the rate control algorithm is hardware specific, there is no need - * or reason to place it as a stand alone module. The driver can call - * iwl_rate_control_register in order to register the rate control callbacks - * with the mac80211 subsystem. This should be performed prior to calling - * ieee80211_register_hw - * - */ -int iwl_mvm_rate_control_register(void); - -/** - * iwl_rate_control_unregister - Unregister the rate control callbacks - * - * This should be called after calling ieee80211_unregister_hw, but before - * the driver is unloaded. - */ -void iwl_mvm_rate_control_unregister(void); - -struct iwl_mvm_sta; - -int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - bool enable); - -#endif /* __rs__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c deleted file mode 100644 index 5b58f5320e8d..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ /dev/null @@ -1,612 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ -#include -#include "iwl-trans.h" -#include "mvm.h" -#include "fw-api.h" - -/* - * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler - * - * Copies the phy information in mvm->last_phy_info, it will be used when the - * actual data will come from the fw in the next packet. - */ -void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); - mvm->ampdu_ref++; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { - spin_lock(&mvm->drv_stats_lock); - mvm->drv_rx_stats.ampdu_count++; - spin_unlock(&mvm->drv_stats_lock); - } -#endif -} - -/* - * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 - * - * Adds the rxb to a new skb and give it to mac80211 - */ -static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, - struct napi_struct *napi, - struct sk_buff *skb, - struct ieee80211_hdr *hdr, u16 len, - u32 ampdu_status, u8 crypt_len, - struct iwl_rx_cmd_buffer *rxb) -{ - unsigned int hdrlen, fraglen; - - /* If frame is small enough to fit in skb->head, pull it completely. - * If not, only pull ieee80211_hdr (including crypto if present, and - * an additional 8 bytes for SNAP/ethertype, see below) so that - * splice() or TCP coalesce are more efficient. - * - * Since, in addition, ieee80211_data_to_8023() always pull in at - * least 8 bytes (possibly more for mesh) we can do the same here - * to save the cost of doing it later. That still doesn't pull in - * the actual IP header since the typical case has a SNAP header. - * If the latter changes (there are efforts in the standards group - * to do so) we should revisit this and ieee80211_data_to_8023(). - */ - hdrlen = (len <= skb_tailroom(skb)) ? len : - sizeof(*hdr) + crypt_len + 8; - - memcpy(skb_put(skb, hdrlen), hdr, hdrlen); - fraglen = len - hdrlen; - - if (fraglen) { - int offset = (void *)hdr + hdrlen - - rxb_addr(rxb) + rxb_offset(rxb); - - skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, - fraglen, rxb->truesize); - } - - ieee80211_rx_napi(mvm->hw, skb, napi); -} - -/* - * iwl_mvm_get_signal_strength - use new rx PHY INFO API - * values are reported by the fw as positive values - need to negate - * to obtain their dBM. Account for missing antennas by replacing 0 - * values by -256dBm: practically 0 power and a non-feasible 8 bit value. - */ -static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, - struct iwl_rx_phy_info *phy_info, - struct ieee80211_rx_status *rx_status) -{ - int energy_a, energy_b, energy_c, max_energy; - u32 val; - - val = - le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); - energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> - IWL_RX_INFO_ENERGY_ANT_A_POS; - energy_a = energy_a ? -energy_a : S8_MIN; - energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> - IWL_RX_INFO_ENERGY_ANT_B_POS; - energy_b = energy_b ? -energy_b : S8_MIN; - energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> - IWL_RX_INFO_ENERGY_ANT_C_POS; - energy_c = energy_c ? -energy_c : S8_MIN; - max_energy = max(energy_a, energy_b); - max_energy = max(max_energy, energy_c); - - IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", - energy_a, energy_b, energy_c, max_energy); - - rx_status->signal = max_energy; - rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & - RX_RES_PHY_FLAGS_ANTENNA) - >> RX_RES_PHY_FLAGS_ANTENNA_POS; - rx_status->chain_signal[0] = energy_a; - rx_status->chain_signal[1] = energy_b; - rx_status->chain_signal[2] = energy_c; -} - -/* - * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format - * @mvm: the mvm object - * @hdr: 80211 header - * @stats: status in mac80211's format - * @rx_pkt_status: status coming from fw - * - * returns non 0 value if the packet should be dropped - */ -static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, - struct ieee80211_hdr *hdr, - struct ieee80211_rx_status *stats, - u32 rx_pkt_status, - u8 *crypt_len) -{ - if (!ieee80211_has_protected(hdr->frame_control) || - (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == - RX_MPDU_RES_STATUS_SEC_NO_ENC) - return 0; - - /* packet was encrypted with unknown alg */ - if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == - RX_MPDU_RES_STATUS_SEC_ENC_ERR) - return 0; - - switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { - case RX_MPDU_RES_STATUS_SEC_CCM_ENC: - /* alg is CCM: check MIC only */ - if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) - return -1; - - stats->flag |= RX_FLAG_DECRYPTED; - *crypt_len = IEEE80211_CCMP_HDR_LEN; - return 0; - - case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: - /* Don't drop the frame and decrypt it in SW */ - if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) - return 0; - *crypt_len = IEEE80211_TKIP_IV_LEN; - /* fall through if TTAK OK */ - - case RX_MPDU_RES_STATUS_SEC_WEP_ENC: - if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) - return -1; - - stats->flag |= RX_FLAG_DECRYPTED; - if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == - RX_MPDU_RES_STATUS_SEC_WEP_ENC) - *crypt_len = IEEE80211_WEP_IV_LEN; - return 0; - - case RX_MPDU_RES_STATUS_SEC_EXT_ENC: - if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) - return -1; - stats->flag |= RX_FLAG_DECRYPTED; - return 0; - - default: - IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); - } - - return 0; -} - -static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, - struct sk_buff *skb, - u32 status) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - - if (mvmvif->features & NETIF_F_RXCSUM && - status & RX_MPDU_RES_STATUS_CSUM_DONE && - status & RX_MPDU_RES_STATUS_CSUM_OK) - skb->ip_summed = CHECKSUM_UNNECESSARY; -} - -/* - * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler - * - * Handles the actual data of the Rx packet from the fw - */ -void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb) -{ - struct ieee80211_hdr *hdr; - struct ieee80211_rx_status *rx_status; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_rx_phy_info *phy_info; - struct iwl_rx_mpdu_res_start *rx_res; - struct ieee80211_sta *sta; - struct sk_buff *skb; - u32 len; - u32 ampdu_status; - u32 rate_n_flags; - u32 rx_pkt_status; - u8 crypt_len = 0; - - phy_info = &mvm->last_phy_info; - rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; - hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); - len = le16_to_cpu(rx_res->byte_count); - rx_pkt_status = le32_to_cpup((__le32 *) - (pkt->data + sizeof(*rx_res) + len)); - - /* Dont use dev_alloc_skb(), we'll have enough headroom once - * ieee80211_hdr pulled. - */ - skb = alloc_skb(128, GFP_ATOMIC); - if (!skb) { - IWL_ERR(mvm, "alloc_skb failed\n"); - return; - } - - rx_status = IEEE80211_SKB_RXCB(skb); - - /* - * drop the packet if it has failed being decrypted by HW - */ - if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, - &crypt_len)) { - IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", - rx_pkt_status); - kfree_skb(skb); - return; - } - - /* - * Keep packets with CRC errors (and with overrun) for monitor mode - * (otherwise the firmware discards them) but mark them as bad. - */ - if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || - !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { - IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - } - - /* This will be used in several places later */ - rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); - - /* rx_status carries information about the packet to mac80211 */ - rx_status->mactime = le64_to_cpu(phy_info->timestamp); - rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); - rx_status->band = - (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status->freq = - ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), - rx_status->band); - /* - * TSF as indicated by the fw is at INA time, but mac80211 expects the - * TSF at the beginning of the MPDU. - */ - /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/ - - iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); - - IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal, - (unsigned long long)rx_status->mactime); - - rcu_read_lock(); - /* - * We have tx blocked stations (with CS bit). If we heard frames from - * a blocked station on a new channel we can TX to it again. - */ - if (unlikely(mvm->csa_tx_block_bcn_timeout)) { - sta = ieee80211_find_sta( - rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2); - if (sta) - iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); - } - - /* This is fine since we don't support multiple AP interfaces */ - sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); - if (sta) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); - - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; - s32 rssi; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); - rssi_trig = (void *)trig->data; - rssi = le32_to_cpu(rssi_trig->rssi); - - trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, - trig); - if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); - } - } - - if (sta && ieee80211_is_data(hdr->frame_control)) - iwl_mvm_rx_csum(sta, skb, rx_pkt_status); - - rcu_read_unlock(); - - /* set the preamble flag if appropriate */ - if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) - rx_status->flag |= RX_FLAG_SHORTPRE; - - if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { - /* - * We know which subframes of an A-MPDU belong - * together since we get a single PHY response - * from the firmware for all of them - */ - rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->ampdu_reference = mvm->ampdu_ref; - } - - /* Set up the HT phy flags */ - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - rx_status->flag |= RX_FLAG_40MHZ; - break; - case RATE_MCS_CHAN_WIDTH_80: - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; - break; - case RATE_MCS_CHAN_WIDTH_160: - rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; - break; - } - if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->flag |= RX_FLAG_SHORT_GI; - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->flag |= RX_FLAG_HT_GF; - if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->flag |= RX_FLAG_LDPC; - if (rate_n_flags & RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->flag |= RX_FLAG_HT; - rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->vht_nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status->flag |= RX_FLAG_VHT; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->vht_flag |= RX_VHT_FLAG_BF; - } else { - rx_status->rate_idx = - iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status->band); - } - -#ifdef CONFIG_IWLWIFI_DEBUGFS - iwl_mvm_update_frame_stats(mvm, rate_n_flags, - rx_status->flag & RX_FLAG_AMPDU_DETAILS); -#endif - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, - crypt_len, rxb); -} - -static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, - struct mvm_statistics_rx *rx_stats) -{ - lockdep_assert_held(&mvm->mutex); - - mvm->rx_stats = *rx_stats; -} - -struct iwl_mvm_stat_data { - struct iwl_mvm *mvm; - __le32 mac_id; - u8 beacon_filter_average_energy; - struct mvm_statistics_general_v8 *general; -}; - -static void iwl_mvm_stat_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_stat_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - int sig = -data->beacon_filter_average_energy; - int last_event; - int thold = vif->bss_conf.cqm_rssi_thold; - int hyst = vif->bss_conf.cqm_rssi_hyst; - u16 id = le32_to_cpu(data->mac_id); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - /* This doesn't need the MAC ID check since it's not taking the - * data copied into the "data" struct, but rather the data from - * the notification directly. - */ - if (data->general) { - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(data->general->beacon_counter[mvmvif->id]); - mvmvif->beacon_stats.avg_signal = - -data->general->beacon_average_energy[mvmvif->id]; - } - - if (mvmvif->id != id) - return; - - if (vif->type != NL80211_IFTYPE_STATION) - return; - - if (sig == 0) { - IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); - return; - } - - mvmvif->bf_data.ave_beacon_signal = sig; - - /* BT Coex */ - if (mvmvif->bf_data.bt_coex_min_thold != - mvmvif->bf_data.bt_coex_max_thold) { - last_event = mvmvif->bf_data.last_bt_coex_event; - if (sig > mvmvif->bf_data.bt_coex_max_thold && - (last_event <= mvmvif->bf_data.bt_coex_min_thold || - last_event == 0)) { - mvmvif->bf_data.last_bt_coex_event = sig; - IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", - sig); - iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); - } else if (sig < mvmvif->bf_data.bt_coex_min_thold && - (last_event >= mvmvif->bf_data.bt_coex_max_thold || - last_event == 0)) { - mvmvif->bf_data.last_bt_coex_event = sig; - IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", - sig); - iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); - } - } - - if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) - return; - - /* CQM Notification */ - last_event = mvmvif->bf_data.last_cqm_event; - if (thold && sig < thold && (last_event == 0 || - sig < last_event - hyst)) { - mvmvif->bf_data.last_cqm_event = sig; - IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", - sig); - ieee80211_cqm_rssi_notify( - vif, - NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, - GFP_KERNEL); - } else if (sig > thold && - (last_event == 0 || sig > last_event + hyst)) { - mvmvif->bf_data.last_cqm_event = sig; - IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", - sig); - ieee80211_cqm_rssi_notify( - vif, - NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, - GFP_KERNEL); - } -} - -static inline void -iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_stats *trig_stats; - u32 trig_offset, trig_thold; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); - trig_stats = (void *)trig->data; - - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) - return; - - trig_offset = le32_to_cpu(trig_stats->stop_offset); - trig_thold = le32_to_cpu(trig_stats->stop_threshold); - - if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) - return; - - if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); -} - -void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; - struct iwl_mvm_stat_data data = { - .mvm = mvm, - }; - u32 temperature; - - if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats)) - goto invalid; - - temperature = le32_to_cpu(stats->general.radio_temperature); - data.mac_id = stats->rx.general.mac_id; - data.beacon_filter_average_energy = - stats->general.beacon_filter_average_energy; - - iwl_mvm_update_rx_statistics(mvm, &stats->rx); - - mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time); - mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time); - mvm->radio_stats.on_time_rf = - le64_to_cpu(stats->general.on_time_rf); - mvm->radio_stats.on_time_scan = - le64_to_cpu(stats->general.on_time_scan); - - data.general = &stats->general; - - iwl_mvm_rx_stats_check_trigger(mvm, pkt); - - ieee80211_iterate_active_interfaces(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_stat_iterator, - &data); - return; - invalid: - IWL_ERR(mvm, "received invalid statistics size (%d)!\n", - iwl_rx_packet_payload_len(pkt)); -} - -void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c deleted file mode 100644 index d6e0c1b5c20c..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ /dev/null @@ -1,1552 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include - -#include "mvm.h" -#include "fw-api-scan.h" - -#define IWL_DENSE_EBS_SCAN_RATIO 5 -#define IWL_SPARSE_EBS_SCAN_RATIO 1 - -enum iwl_mvm_scan_type { - IWL_SCAN_TYPE_UNASSOC, - IWL_SCAN_TYPE_WILD, - IWL_SCAN_TYPE_MILD, - IWL_SCAN_TYPE_FRAGMENTED, -}; - -enum iwl_mvm_traffic_load { - IWL_MVM_TRAFFIC_LOW, - IWL_MVM_TRAFFIC_MEDIUM, - IWL_MVM_TRAFFIC_HIGH, -}; - -struct iwl_mvm_scan_timing_params { - u32 dwell_active; - u32 dwell_passive; - u32 dwell_fragmented; - u32 suspend_time; - u32 max_out_time; -}; - -static struct iwl_mvm_scan_timing_params scan_timing[] = { - [IWL_SCAN_TYPE_UNASSOC] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .suspend_time = 0, - .max_out_time = 0, - }, - [IWL_SCAN_TYPE_WILD] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .suspend_time = 30, - .max_out_time = 120, - }, - [IWL_SCAN_TYPE_MILD] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .suspend_time = 120, - .max_out_time = 120, - }, - [IWL_SCAN_TYPE_FRAGMENTED] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .suspend_time = 95, - .max_out_time = 44, - }, -}; - -struct iwl_mvm_scan_params { - enum iwl_mvm_scan_type type; - u32 n_channels; - u16 delay; - int n_ssids; - struct cfg80211_ssid *ssids; - struct ieee80211_channel **channels; - u32 flags; - u8 *mac_addr; - u8 *mac_addr_mask; - bool no_cck; - bool pass_all; - int n_match_sets; - struct iwl_scan_probe_req preq; - struct cfg80211_match_set *match_sets; - int n_scan_plans; - struct cfg80211_sched_scan_plan *scan_plans; -}; - -static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) -{ - if (mvm->scan_rx_ant != ANT_NONE) - return mvm->scan_rx_ant; - return iwl_mvm_get_valid_rx_ant(mvm); -} - -static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) -{ - u16 rx_chain; - u8 rx_ant; - - rx_ant = iwl_mvm_scan_rx_ant(mvm); - rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; - rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; - rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; - rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; - return cpu_to_le16(rx_chain); -} - -static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band) -{ - if (band == IEEE80211_BAND_2GHZ) - return cpu_to_le32(PHY_BAND_24); - else - return cpu_to_le32(PHY_BAND_5); -} - -static inline __le32 -iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, - bool no_cck) -{ - u32 tx_ant; - - mvm->scan_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->scan_last_antenna_idx); - tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; - - if (band == IEEE80211_BAND_2GHZ && !no_cck) - return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | - tx_ant); - else - return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); -} - -static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int *global_cnt = data; - - if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id < MAX_PHYS) - *global_cnt += 1; -} - -static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) -{ - return IWL_MVM_TRAFFIC_LOW; -} - -static enum -iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) -{ - int global_cnt = 0; - enum iwl_mvm_traffic_load load; - bool low_latency; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_scan_condition_iterator, - &global_cnt); - if (!global_cnt) - return IWL_SCAN_TYPE_UNASSOC; - - load = iwl_mvm_get_traffic_load(mvm); - low_latency = iwl_mvm_low_latency(mvm); - - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && - vif->type != NL80211_IFTYPE_P2P_DEVICE && - fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) - return IWL_SCAN_TYPE_FRAGMENTED; - - if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) - return IWL_SCAN_TYPE_MILD; - - return IWL_SCAN_TYPE_WILD; -} - -static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) -{ - /* require rrm scan whenever the fw supports it */ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); -} - -static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) -{ - int max_probe_len; - - max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; - - /* we create the 802.11 header and SSID element */ - max_probe_len -= 24 + 2; - - /* DS parameter set element is added on 2.4GHZ band if required */ - if (iwl_mvm_rrm_scan_needed(mvm)) - max_probe_len -= 3; - - return max_probe_len; -} - -int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) -{ - int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm); - - /* TODO: [BUG] This function should return the maximum allowed size of - * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs - * in the same command. So the correct implementation of this function - * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan - * command has only 512 bytes and it would leave us with about 240 - * bytes for scan IEs, which is clearly not enough. So meanwhile - * we will report an incorrect value. This may result in a failure to - * issue a scan in unified_scan_lmac and unified_sched_scan_lmac - * functions with -ENOBUFS, if a large enough probe will be provided. - */ - return max_ie_len; -} - -static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, - int num_res, u8 *buf, size_t buf_size) -{ - int i; - u8 *pos = buf, *end = buf + buf_size; - - for (i = 0; pos < end && i < num_res; i++) - pos += snprintf(pos, end - pos, " %u", res[i].channel); - - /* terminate the string in case the buffer was too short */ - *(buf + buf_size - 1) = '\0'; - - return buf; -} - -void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; - u8 buf[256]; - - IWL_DEBUG_SCAN(mvm, - "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n", - notif->status, notif->scanned_channels, - iwl_mvm_dump_channel_list(notif->results, - notif->scanned_channels, buf, - sizeof(buf))); -} - -void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); - ieee80211_sched_scan_results(mvm->hw); -} - -static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) -{ - switch (status) { - case IWL_SCAN_EBS_SUCCESS: - return "successful"; - case IWL_SCAN_EBS_INACTIVE: - return "inactive"; - case IWL_SCAN_EBS_FAILED: - case IWL_SCAN_EBS_CHAN_NOT_FOUND: - default: - return "failed"; - } -} - -void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; - bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); - - /* scan status must be locked for proper checking */ - lockdep_assert_held(&mvm->mutex); - - /* We first check if we were stopping a scan, in which case we - * just clear the stopping flag. Then we check if it was a - * firmware initiated stop, in which case we need to inform - * mac80211. - * Note that we can have a stopping and a running scan - * simultaneously, but we can't have two different types of - * scans stopping or running at the same time (since LMAC - * doesn't support it). - */ - - if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) { - WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR); - - IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", - aborted ? "aborted" : "completed", - iwl_mvm_ebs_status_str(scan_notif->ebs_status)); - IWL_DEBUG_SCAN(mvm, - "Last line %d, Last iteration %d, Time after last iteration %d\n", - scan_notif->last_schedule_line, - scan_notif->last_schedule_iteration, - __le32_to_cpu(scan_notif->time_after_last_iter)); - - mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; - } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { - IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", - aborted ? "aborted" : "completed", - iwl_mvm_ebs_status_str(scan_notif->ebs_status)); - - mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; - } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { - WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); - - IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", - aborted ? "aborted" : "completed", - iwl_mvm_ebs_status_str(scan_notif->ebs_status)); - IWL_DEBUG_SCAN(mvm, - "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", - scan_notif->last_schedule_line, - scan_notif->last_schedule_iteration, - __le32_to_cpu(scan_notif->time_after_last_iter)); - - mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; - ieee80211_sched_scan_stopped(mvm->hw); - } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { - IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", - aborted ? "aborted" : "completed", - iwl_mvm_ebs_status_str(scan_notif->ebs_status)); - - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - ieee80211_scan_completed(mvm->hw, - scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } - - mvm->last_ebs_successful = - scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || - scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; -} - -static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) -{ - int i; - - for (i = 0; i < PROBE_OPTION_MAX; i++) { - if (!ssid_list[i].len) - break; - if (ssid_list[i].len == ssid_len && - !memcmp(ssid_list->ssid, ssid, ssid_len)) - return i; - } - return -1; -} - -/* We insert the SSIDs in an inverted order, because the FW will - * invert it back. - */ -static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, - struct iwl_ssid_ie *ssids, - u32 *ssid_bitmap) -{ - int i, j; - int index; - - /* - * copy SSIDs from match list. - * iwl_config_sched_scan_profiles() uses the order of these ssids to - * config match list. - */ - for (i = 0, j = params->n_match_sets - 1; - j >= 0 && i < PROBE_OPTION_MAX; - i++, j--) { - /* skip empty SSID matchsets */ - if (!params->match_sets[j].ssid.ssid_len) - continue; - ssids[i].id = WLAN_EID_SSID; - ssids[i].len = params->match_sets[j].ssid.ssid_len; - memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid, - ssids[i].len); - } - - /* add SSIDs from scan SSID list */ - *ssid_bitmap = 0; - for (j = params->n_ssids - 1; - j >= 0 && i < PROBE_OPTION_MAX; - i++, j--) { - index = iwl_ssid_exist(params->ssids[j].ssid, - params->ssids[j].ssid_len, - ssids); - if (index < 0) { - ssids[i].id = WLAN_EID_SSID; - ssids[i].len = params->ssids[j].ssid_len; - memcpy(ssids[i].ssid, params->ssids[j].ssid, - ssids[i].len); - *ssid_bitmap |= BIT(i); - } else { - *ssid_bitmap |= BIT(index); - } - } -} - -static int -iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req) -{ - struct iwl_scan_offload_profile *profile; - struct iwl_scan_offload_profile_cfg *profile_cfg; - struct iwl_scan_offload_blacklist *blacklist; - struct iwl_host_cmd cmd = { - .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, - .len[1] = sizeof(*profile_cfg), - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - .dataflags[1] = IWL_HCMD_DFL_NOCOPY, - }; - int blacklist_len; - int i; - int ret; - - if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES)) - return -EIO; - - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) - blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; - else - blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; - - blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL); - if (!blacklist) - return -ENOMEM; - - profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL); - if (!profile_cfg) { - ret = -ENOMEM; - goto free_blacklist; - } - - cmd.data[0] = blacklist; - cmd.len[0] = sizeof(*blacklist) * blacklist_len; - cmd.data[1] = profile_cfg; - - /* No blacklist configuration */ - - profile_cfg->num_profiles = req->n_match_sets; - profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN; - profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN; - profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN; - if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) - profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN; - - for (i = 0; i < req->n_match_sets; i++) { - profile = &profile_cfg->profiles[i]; - profile->ssid_index = i; - /* Support any cipher and auth algorithm */ - profile->unicast_cipher = 0xff; - profile->auth_alg = 0xff; - profile->network_type = IWL_NETWORK_TYPE_ANY; - profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; - profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; - } - - IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n"); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - kfree(profile_cfg); -free_blacklist: - kfree(blacklist); - - return ret; -} - -static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req) -{ - if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { - IWL_DEBUG_SCAN(mvm, - "Sending scheduled scan with filtering, n_match_sets %d\n", - req->n_match_sets); - return false; - } - - IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); - return true; -} - -static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) -{ - int ret; - struct iwl_host_cmd cmd = { - .id = SCAN_OFFLOAD_ABORT_CMD, - }; - u32 status; - - ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); - if (ret) - return ret; - - if (status != CAN_ABORT_STATUS) { - /* - * The scan abort will return 1 for success or - * 2 for "failure". A failure condition can be - * due to simply not being in an active scan which - * can occur if we send the scan abort before the - * microcode has notified us that a scan is completed. - */ - IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status); - ret = -ENOENT; - } - - return ret; -} - -static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, - struct iwl_scan_req_tx_cmd *tx_cmd, - bool no_cck) -{ - tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | - TX_CMD_FLG_BT_DIS); - tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, - IEEE80211_BAND_2GHZ, - no_cck); - tx_cmd[0].sta_id = mvm->aux_sta.sta_id; - - tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | - TX_CMD_FLG_BT_DIS); - tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, - IEEE80211_BAND_5GHZ, - no_cck); - tx_cmd[1].sta_id = mvm->aux_sta.sta_id; -} - -static void -iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, - struct ieee80211_channel **channels, - int n_channels, u32 ssid_bitmap, - struct iwl_scan_req_lmac *cmd) -{ - struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data; - int i; - - for (i = 0; i < n_channels; i++) { - channel_cfg[i].channel_num = - cpu_to_le16(channels[i]->hw_value); - channel_cfg[i].iter_count = cpu_to_le16(1); - channel_cfg[i].iter_interval = 0; - channel_cfg[i].flags = - cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL | - ssid_bitmap); - } -} - -static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, - size_t len, u8 *const pos) -{ - static const u8 before_ds_params[] = { - WLAN_EID_SSID, - WLAN_EID_SUPP_RATES, - WLAN_EID_REQUEST, - WLAN_EID_EXT_SUPP_RATES, - }; - size_t offs; - u8 *newpos = pos; - - if (!iwl_mvm_rrm_scan_needed(mvm)) { - memcpy(newpos, ies, len); - return newpos + len; - } - - offs = ieee80211_ie_split(ies, len, - before_ds_params, - ARRAY_SIZE(before_ds_params), - 0); - - memcpy(newpos, ies, offs); - newpos += offs; - - /* Add a placeholder for DS Parameter Set element */ - *newpos++ = WLAN_EID_DS_PARAMS; - *newpos++ = 1; - *newpos++ = 0; - - memcpy(newpos, ies + offs, len - offs); - newpos += len - offs; - - return newpos; -} - -static void -iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_scan_ies *ies, - struct iwl_mvm_scan_params *params) -{ - struct ieee80211_mgmt *frame = (void *)params->preq.buf; - u8 *pos, *newpos; - const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? - params->mac_addr : NULL; - - /* - * Unfortunately, right now the offload scan doesn't support randomising - * within the firmware, so until the firmware API is ready we implement - * it in the driver. This means that the scan iterations won't really be - * random, only when it's restarted, but at least that helps a bit. - */ - if (mac_addr) - get_random_mask_addr(frame->sa, mac_addr, - params->mac_addr_mask); - else - memcpy(frame->sa, vif->addr, ETH_ALEN); - - frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); - eth_broadcast_addr(frame->da); - eth_broadcast_addr(frame->bssid); - frame->seq_ctrl = 0; - - pos = frame->u.probe_req.variable; - *pos++ = WLAN_EID_SSID; - *pos++ = 0; - - params->preq.mac_header.offset = 0; - params->preq.mac_header.len = cpu_to_le16(24 + 2); - - /* Insert ds parameter set element on 2.4 GHz band */ - newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, - ies->ies[IEEE80211_BAND_2GHZ], - ies->len[IEEE80211_BAND_2GHZ], - pos); - params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); - params->preq.band_data[0].len = cpu_to_le16(newpos - pos); - pos = newpos; - - memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], - ies->len[IEEE80211_BAND_5GHZ]); - params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); - params->preq.band_data[1].len = - cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]); - pos += ies->len[IEEE80211_BAND_5GHZ]; - - memcpy(pos, ies->common_ies, ies->common_ie_len); - params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); - params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); -} - -static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm, - enum iwl_scan_priority_ext prio) -{ - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY)) - return cpu_to_le32(prio); - - if (prio <= IWL_SCAN_PRIORITY_EXT_2) - return cpu_to_le32(IWL_SCAN_PRIORITY_LOW); - - if (prio <= IWL_SCAN_PRIORITY_EXT_4) - return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM); - - return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); -} - -static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, - struct iwl_scan_req_lmac *cmd, - struct iwl_mvm_scan_params *params) -{ - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; - cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; - cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); - cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); - cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); -} - -static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, - struct ieee80211_scan_ies *ies, - int n_channels) -{ - return ((n_ssids <= PROBE_OPTION_MAX) && - (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & - (ies->common_ie_len + - ies->len[NL80211_BAND_2GHZ] + - ies->len[NL80211_BAND_5GHZ] <= - iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); -} - -static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; - - /* We can only use EBS if: - * 1. the feature is supported; - * 2. the last EBS was successful; - * 3. if only single scan, the single scan EBS API is supported; - * 4. it's not a p2p find operation. - */ - return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && - mvm->last_ebs_successful && - vif->type != NL80211_IFTYPE_P2P_DEVICE); -} - -static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, - struct iwl_mvm_scan_params *params) -{ - int flags = 0; - - if (params->n_ssids == 0) - flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; - - if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) - flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; - - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) - flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; - - if (iwl_mvm_rrm_scan_needed(mvm)) - flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED; - - if (params->pass_all) - flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; - else - flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->scan_iter_notif_enabled) - flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; -#endif - - return flags; -} - -static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) -{ - struct iwl_scan_req_lmac *cmd = mvm->scan_cmd; - struct iwl_scan_probe_req *preq = - (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * - mvm->fw->ucode_capa.n_scan_channels); - u32 ssid_bitmap = 0; - int i; - - lockdep_assert_held(&mvm->mutex); - - memset(cmd, 0, ksize(cmd)); - - if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) - return -EINVAL; - - iwl_mvm_scan_lmac_dwell(mvm, cmd, params); - - cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); - cmd->iter_num = cpu_to_le32(1); - cmd->n_channels = (u8)params->n_channels; - - cmd->delay = cpu_to_le32(params->delay); - - cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params)); - - cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band); - cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | - MAC_FILTER_IN_BEACON); - iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck); - iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap); - - /* this API uses bits 1-20 instead of 0-19 */ - ssid_bitmap <<= 1; - - for (i = 0; i < params->n_scan_plans; i++) { - struct cfg80211_sched_scan_plan *scan_plan = - ¶ms->scan_plans[i]; - - cmd->schedule[i].delay = - cpu_to_le16(scan_plan->interval); - cmd->schedule[i].iterations = scan_plan->iterations; - cmd->schedule[i].full_scan_mul = 1; - } - - /* - * If the number of iterations of the last scan plan is set to - * zero, it should run infinitely. However, this is not always the case. - * For example, when regular scan is requested the driver sets one scan - * plan with one iteration. - */ - if (!cmd->schedule[i - 1].iterations) - cmd->schedule[i - 1].iterations = 0xff; - - if (iwl_mvm_scan_use_ebs(mvm, vif)) { - cmd->channel_opt[0].flags = - cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); - cmd->channel_opt[0].non_ebs_ratio = - cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); - cmd->channel_opt[1].flags = - cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); - cmd->channel_opt[1].non_ebs_ratio = - cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); - } - - iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels, - params->n_channels, ssid_bitmap, cmd); - - *preq = params->preq; - - return 0; -} - -static int rate_to_scan_rate_flag(unsigned int rate) -{ - static const int rate_to_scan_rate[IWL_RATE_COUNT] = { - [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M, - [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M, - [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M, - [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M, - [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M, - [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M, - [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M, - [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M, - [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M, - [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M, - [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M, - [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M, - }; - - return rate_to_scan_rate[rate]; -} - -static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) -{ - struct ieee80211_supported_band *band; - unsigned int rates = 0; - int i; - - band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; - for (i = 0; i < band->n_bitrates; i++) - rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); - band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - for (i = 0; i < band->n_bitrates; i++) - rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); - - /* Set both basic rates and supported rates */ - rates |= SCAN_CONFIG_SUPPORTED_RATE(rates); - - return cpu_to_le32(rates); -} - -int iwl_mvm_config_scan(struct iwl_mvm *mvm) -{ - struct iwl_scan_config *scan_config; - struct ieee80211_supported_band *band; - int num_channels = - mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; - int ret, i, j = 0, cmd_size; - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), - }; - - if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) - return -ENOBUFS; - - cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; - - scan_config = kzalloc(cmd_size, GFP_KERNEL); - if (!scan_config) - return -ENOMEM; - - scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | - SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | - SCAN_CONFIG_FLAG_SET_TX_CHAINS | - SCAN_CONFIG_FLAG_SET_RX_CHAINS | - SCAN_CONFIG_FLAG_SET_ALL_TIMES | - SCAN_CONFIG_FLAG_SET_LEGACY_RATES | - SCAN_CONFIG_FLAG_SET_MAC_ADDR | - SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| - SCAN_CONFIG_N_CHANNELS(num_channels)); - scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); - scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); - scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); - scan_config->out_of_channel_time = cpu_to_le32(170); - scan_config->suspend_time = cpu_to_le32(30); - scan_config->dwell_active = 20; - scan_config->dwell_passive = 110; - scan_config->dwell_fragmented = 20; - - memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); - - scan_config->bcast_sta_id = mvm->aux_sta.sta_id; - scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS | - IWL_CHANNEL_FLAG_ACCURATE_EBS | - IWL_CHANNEL_FLAG_EBS_ADD | - IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - - band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; - for (i = 0; i < band->n_channels; i++, j++) - scan_config->channel_array[j] = band->channels[i].hw_value; - band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - for (i = 0; i < band->n_channels; i++, j++) - scan_config->channel_array[j] = band->channels[i].hw_value; - - cmd.data[0] = scan_config; - cmd.len[0] = cmd_size; - cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - - IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(scan_config); - return ret; -} - -static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) -{ - int i; - - for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid_status[i] == status) - return i; - - return -ENOENT; -} - -static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) -{ - return params->n_scan_plans == 1 && - params->scan_plans[0].iterations == 1; -} - -static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, - struct iwl_scan_req_umac *cmd, - struct iwl_mvm_scan_params *params) -{ - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; - cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; - cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); - cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); - cmd->scan_priority = - iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); - - if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = - iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); - else - cmd->ooc_priority = - iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2); -} - -static void -iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, - struct ieee80211_channel **channels, - int n_channels, u32 ssid_bitmap, - struct iwl_scan_req_umac *cmd) -{ - struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data; - int i; - - for (i = 0; i < n_channels; i++) { - channel_cfg[i].flags = cpu_to_le32(ssid_bitmap); - channel_cfg[i].channel_num = channels[i]->hw_value; - channel_cfg[i].iter_count = 1; - channel_cfg[i].iter_interval = 0; - } -} - -static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, - struct iwl_mvm_scan_params *params) -{ - int flags = 0; - - if (params->n_ssids == 0) - flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; - - if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; - - if (iwl_mvm_rrm_scan_needed(mvm)) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; - - if (params->pass_all) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; - else - flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; - - if (!iwl_mvm_is_regular_scan(params)) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->scan_iter_notif_enabled) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; -#endif - return flags; -} - -static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params, - int type) -{ - struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + - sizeof(struct iwl_scan_channel_cfg_umac) * - mvm->fw->ucode_capa.n_scan_channels; - int uid, i; - u32 ssid_bitmap = 0; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) - return -EINVAL; - - uid = iwl_mvm_scan_uid_by_status(mvm, 0); - if (uid < 0) - return uid; - - memset(cmd, 0, ksize(cmd)); - - iwl_mvm_scan_umac_dwell(mvm, cmd, params); - - mvm->scan_uid_status[uid] = type; - - cmd->uid = cpu_to_le32(uid); - cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); - - if (type == IWL_MVM_SCAN_SCHED) - cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); - - if (iwl_mvm_scan_use_ebs(mvm, vif)) - cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - - cmd->n_channels = params->n_channels; - - iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); - - iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, - params->n_channels, ssid_bitmap, cmd); - - for (i = 0; i < params->n_scan_plans; i++) { - struct cfg80211_sched_scan_plan *scan_plan = - ¶ms->scan_plans[i]; - - sec_part->schedule[i].iter_count = scan_plan->iterations; - sec_part->schedule[i].interval = - cpu_to_le16(scan_plan->interval); - } - - /* - * If the number of iterations of the last scan plan is set to - * zero, it should run infinitely. However, this is not always the case. - * For example, when regular scan is requested the driver sets one scan - * plan with one iteration. - */ - if (!sec_part->schedule[i - 1].iter_count) - sec_part->schedule[i - 1].iter_count = 0xff; - - sec_part->delay = cpu_to_le16(params->delay); - sec_part->preq = params->preq; - - return 0; -} - -static int iwl_mvm_num_scans(struct iwl_mvm *mvm) -{ - return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); -} - -static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) -{ - /* This looks a bit arbitrary, but the idea is that if we run - * out of possible simultaneous scans and the userspace is - * trying to run a scan type that is already running, we - * return -EBUSY. But if the userspace wants to start a - * different type of scan, we stop the opposite type to make - * space for the new request. The reason is backwards - * compatibility with old wpa_supplicant that wouldn't stop a - * scheduled scan before starting a normal scan. - */ - - if (iwl_mvm_num_scans(mvm) < mvm->max_scans) - return 0; - - /* Use a switch, even though this is a bitmask, so that more - * than one bits set will fall in default and we will warn. - */ - switch (type) { - case IWL_MVM_SCAN_REGULAR: - if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) - return -EBUSY; - return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); - case IWL_MVM_SCAN_SCHED: - if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) - return -EBUSY; - iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - case IWL_MVM_SCAN_NETDETECT: - /* No need to stop anything for net-detect since the - * firmware is restarted anyway. This way, any sched - * scans that were running will be restarted when we - * resume. - */ - return 0; - default: - WARN_ON(1); - break; - } - - return -EIO; -} - -int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req, - struct ieee80211_scan_ies *ies) -{ - struct iwl_host_cmd hcmd = { - .len = { iwl_mvm_scan_size(mvm), }, - .data = { mvm->scan_cmd, }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - struct iwl_mvm_scan_params params = {}; - int ret; - struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 }; - - lockdep_assert_held(&mvm->mutex); - - if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { - IWL_ERR(mvm, "scan while LAR regdomain is not set\n"); - return -EBUSY; - } - - ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); - if (ret) - return ret; - - /* we should have failed registration if scan_cmd was NULL */ - if (WARN_ON(!mvm->scan_cmd)) - return -ENOMEM; - - if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) - return -ENOBUFS; - - params.n_ssids = req->n_ssids; - params.flags = req->flags; - params.n_channels = req->n_channels; - params.delay = 0; - params.ssids = req->ssids; - params.channels = req->channels; - params.mac_addr = req->mac_addr; - params.mac_addr_mask = req->mac_addr_mask; - params.no_cck = req->no_cck; - params.pass_all = true; - params.n_match_sets = 0; - params.match_sets = NULL; - - params.scan_plans = &scan_plan; - params.n_scan_plans = 1; - - params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); - - iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, - IWL_MVM_SCAN_REGULAR); - } else { - hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; - ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); - } - - if (ret) - return ret; - - ret = iwl_mvm_send_cmd(mvm, &hcmd); - if (ret) { - /* If the scan failed, it usually means that the FW was unable - * to allocate the time events. Warn on it, but maybe we - * should try to send the command again with different params. - */ - IWL_ERR(mvm, "Scan failed! ret %d\n", ret); - return ret; - } - - IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); - mvm->scan_status |= IWL_MVM_SCAN_REGULAR; - iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); - - return 0; -} - -int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies, - int type) -{ - struct iwl_host_cmd hcmd = { - .len = { iwl_mvm_scan_size(mvm), }, - .data = { mvm->scan_cmd, }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - struct iwl_mvm_scan_params params = {}; - int ret; - - lockdep_assert_held(&mvm->mutex); - - if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { - IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n"); - return -EBUSY; - } - - ret = iwl_mvm_check_running_scans(mvm, type); - if (ret) - return ret; - - /* we should have failed registration if scan_cmd was NULL */ - if (WARN_ON(!mvm->scan_cmd)) - return -ENOMEM; - - if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) - return -ENOBUFS; - - params.n_ssids = req->n_ssids; - params.flags = req->flags; - params.n_channels = req->n_channels; - params.ssids = req->ssids; - params.channels = req->channels; - params.mac_addr = req->mac_addr; - params.mac_addr_mask = req->mac_addr_mask; - params.no_cck = false; - params.pass_all = iwl_mvm_scan_pass_all(mvm, req); - params.n_match_sets = req->n_match_sets; - params.match_sets = req->match_sets; - if (!req->n_scan_plans) - return -EINVAL; - - params.n_scan_plans = req->n_scan_plans; - params.scan_plans = req->scan_plans; - - params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); - - /* In theory, LMAC scans can handle a 32-bit delay, but since - * waiting for over 18 hours to start the scan is a bit silly - * and to keep it aligned with UMAC scans (which only support - * 16-bit delays), trim it down to 16-bits. - */ - if (req->delay > U16_MAX) { - IWL_DEBUG_SCAN(mvm, - "delay value is > 16-bits, set to max possible\n"); - params.delay = U16_MAX; - } else { - params.delay = req->delay; - } - - ret = iwl_mvm_config_sched_scan_profiles(mvm, req); - if (ret) - return ret; - - iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); - } else { - hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; - ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); - } - - if (ret) - return ret; - - ret = iwl_mvm_send_cmd(mvm, &hcmd); - if (!ret) { - IWL_DEBUG_SCAN(mvm, - "Sched scan request was sent successfully\n"); - mvm->scan_status |= type; - } else { - /* If the scan failed, it usually means that the FW was unable - * to allocate the time events. Warn on it, but maybe we - * should try to send the command again with different params. - */ - IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); - } - - return ret; -} - -void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_umac_scan_complete *notif = (void *)pkt->data; - u32 uid = __le32_to_cpu(notif->uid); - bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); - - if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) - return; - - /* if the scan is already stopping, we don't need to notify mac80211 */ - if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { - ieee80211_scan_completed(mvm->hw, aborted); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { - ieee80211_sched_scan_stopped(mvm->hw); - } - - mvm->scan_status &= ~mvm->scan_uid_status[uid]; - IWL_DEBUG_SCAN(mvm, - "Scan completed, uid %u type %u, status %s, EBS status %s\n", - uid, mvm->scan_uid_status[uid], - notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? - "completed" : "aborted", - iwl_mvm_ebs_status_str(notif->ebs_status)); - IWL_DEBUG_SCAN(mvm, - "Last line %d, Last iteration %d, Time from last iteration %d\n", - notif->last_schedule, notif->last_iter, - __le32_to_cpu(notif->time_from_last_iter)); - - if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && - notif->ebs_status != IWL_SCAN_EBS_INACTIVE) - mvm->last_ebs_successful = false; - - mvm->scan_uid_status[uid] = 0; -} - -void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; - u8 buf[256]; - - IWL_DEBUG_SCAN(mvm, - "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", - notif->status, notif->scanned_channels, - iwl_mvm_dump_channel_list(notif->results, - notif->scanned_channels, buf, - sizeof(buf))); -} - -static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) -{ - struct iwl_umac_scan_abort cmd = {}; - int uid, ret; - - lockdep_assert_held(&mvm->mutex); - - /* We should always get a valid index here, because we already - * checked that this type of scan was running in the generic - * code. - */ - uid = iwl_mvm_scan_uid_by_status(mvm, type); - if (WARN_ON_ONCE(uid < 0)) - return uid; - - cmd.uid = cpu_to_le32(uid); - - IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - - ret = iwl_mvm_send_cmd_pdu(mvm, - iwl_cmd_id(SCAN_ABORT_UMAC, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(cmd), &cmd); - if (!ret) - mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; - - return ret; -} - -static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) -{ - struct iwl_notification_wait wait_scan_done; - static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, - SCAN_OFFLOAD_COMPLETE, }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, - scan_done_notif, - ARRAY_SIZE(scan_done_notif), - NULL, NULL); - - IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); - - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - ret = iwl_mvm_umac_scan_abort(mvm, type); - else - ret = iwl_mvm_lmac_scan_abort(mvm); - - if (ret) { - IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); - iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); - return ret; - } - - ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - - return ret; -} - -int iwl_mvm_scan_size(struct iwl_mvm *mvm) -{ - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - return sizeof(struct iwl_scan_req_umac) + - sizeof(struct iwl_scan_channel_cfg_umac) * - mvm->fw->ucode_capa.n_scan_channels + - sizeof(struct iwl_scan_req_umac_tail); - - return sizeof(struct iwl_scan_req_lmac) + - sizeof(struct iwl_scan_channel_cfg_lmac) * - mvm->fw->ucode_capa.n_scan_channels + - sizeof(struct iwl_scan_probe_req); -} - -/* - * This function is used in nic restart flow, to inform mac80211 about scans - * that was aborted by restart flow or by an assert. - */ -void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) -{ - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - int uid, i; - - uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); - if (uid >= 0) { - ieee80211_scan_completed(mvm->hw, true); - mvm->scan_uid_status[uid] = 0; - } - uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); - if (uid >= 0 && !mvm->restart_fw) { - ieee80211_sched_scan_stopped(mvm->hw); - mvm->scan_uid_status[uid] = 0; - } - - /* We shouldn't have any UIDs still set. Loop over all the - * UIDs to make sure there's nothing left there and warn if - * any is found. - */ - for (i = 0; i < mvm->max_scans; i++) { - if (WARN_ONCE(mvm->scan_uid_status[i], - "UMAC scan UID %d status was not cleaned\n", - i)) - mvm->scan_uid_status[i] = 0; - } - } else { - if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) - ieee80211_scan_completed(mvm->hw, true); - - /* Sched scan will be restarted by mac80211 in - * restart_hw, so do not report if FW is about to be - * restarted. - */ - if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw) - ieee80211_sched_scan_stopped(mvm->hw); - } -} - -int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) -{ - int ret; - - if (!(mvm->scan_status & type)) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ret = 0; - goto out; - } - - ret = iwl_mvm_scan_stop_wait(mvm, type); - if (!ret) - mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; -out: - /* Clear the scan status so the next scan requests will - * succeed and mark the scan as stopping, so that the Rx - * handler doesn't do anything, as the scan was stopped from - * above. - */ - mvm->scan_status &= ~type; - - if (type == IWL_MVM_SCAN_REGULAR) { - /* Since the rx handler won't do anything now, we have - * to release the scan reference here. - */ - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - if (notify) - ieee80211_scan_completed(mvm->hw, true); - } else if (notify) { - ieee80211_sched_scan_stopped(mvm->hw); - } - - return ret; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c deleted file mode 100644 index b0f59fdd287c..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ /dev/null @@ -1,340 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "mvm.h" - -/* For counting bound interfaces */ -struct iwl_mvm_active_iface_iterator_data { - struct ieee80211_vif *ignore_vif; - u8 sta_vif_ap_sta_id; - enum iwl_sf_state sta_vif_state; - int num_active_macs; -}; - -/* - * Count bound interfaces which are not p2p, besides data->ignore_vif. - * data->station_vif will point to one bound vif of type station, if exists. - */ -static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_active_iface_iterator_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (vif == data->ignore_vif || !mvmvif->phy_ctxt || - vif->type == NL80211_IFTYPE_P2P_DEVICE) - return; - - data->num_active_macs++; - - if (vif->type == NL80211_IFTYPE_STATION) { - data->sta_vif_ap_sta_id = mvmvif->ap_sta_id; - if (vif->bss_conf.assoc) - data->sta_vif_state = SF_FULL_ON; - else - data->sta_vif_state = SF_INIT_OFF; - } -} - -/* - * Aging and idle timeouts for the different possible scenarios - * in default configuration - */ -static const -__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { - { - cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF), - cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF) - }, - { - cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF), - cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF) - }, - { - cpu_to_le32(SF_MCAST_AGING_TIMER_DEF), - cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF) - }, - { - cpu_to_le32(SF_BA_AGING_TIMER_DEF), - cpu_to_le32(SF_BA_IDLE_TIMER_DEF) - }, - { - cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF), - cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF) - }, -}; - -/* - * Aging and idle timeouts for the different possible scenarios - * in single BSS MAC configuration. - */ -static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { - { - cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER), - cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER) - }, - { - cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER), - cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER) - }, - { - cpu_to_le32(SF_MCAST_AGING_TIMER), - cpu_to_le32(SF_MCAST_IDLE_TIMER) - }, - { - cpu_to_le32(SF_BA_AGING_TIMER), - cpu_to_le32(SF_BA_IDLE_TIMER) - }, - { - cpu_to_le32(SF_TX_RE_AGING_TIMER), - cpu_to_le32(SF_TX_RE_IDLE_TIMER) - }, -}; - -static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, - struct iwl_sf_cfg_cmd *sf_cmd, - struct ieee80211_sta *sta) -{ - int i, j, watermark; - - sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN); - - /* - * If we are in association flow - check antenna configuration - * capabilities of the AP station, and choose the watermark accordingly. - */ - if (sta) { - if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) { - switch (sta->rx_nss) { - case 1: - watermark = SF_W_MARK_SISO; - break; - case 2: - watermark = SF_W_MARK_MIMO2; - break; - default: - watermark = SF_W_MARK_MIMO3; - break; - } - } else { - watermark = SF_W_MARK_LEGACY; - } - /* default watermark value for unassociated mode. */ - } else { - watermark = SF_W_MARK_MIMO2; - } - sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark); - - for (i = 0; i < SF_NUM_SCENARIO; i++) { - for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) { - sf_cmd->long_delay_timeouts[i][j] = - cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); - } - } - - if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { - BUILD_BUG_ON(sizeof(sf_full_timeout) != - sizeof(__le32) * SF_NUM_SCENARIO * - SF_NUM_TIMEOUT_TYPES); - - memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, - sizeof(sf_full_timeout)); - } else { - BUILD_BUG_ON(sizeof(sf_full_timeout_def) != - sizeof(__le32) * SF_NUM_SCENARIO * - SF_NUM_TIMEOUT_TYPES); - - memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, - sizeof(sf_full_timeout_def)); - } - -} - -static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, - enum iwl_sf_state new_state) -{ - struct iwl_sf_cfg_cmd sf_cmd = { - .state = cpu_to_le32(SF_FULL_ON), - }; - struct ieee80211_sta *sta; - int ret = 0; - - if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13) - sf_cmd.state = cpu_to_le32(new_state); - - if (mvm->cfg->disable_dummy_notification) - sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); - - /* - * If an associated AP sta changed its antenna configuration, the state - * will remain FULL_ON but SF parameters need to be reconsidered. - */ - if (new_state != SF_FULL_ON && mvm->sf_state == new_state) - return 0; - - switch (new_state) { - case SF_UNINIT: - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) - iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); - break; - case SF_FULL_ON: - if (sta_id == IWL_MVM_STATION_COUNT) { - IWL_ERR(mvm, - "No station: Cannot switch SF to FULL_ON\n"); - return -EINVAL; - } - rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, "Invalid station id\n"); - rcu_read_unlock(); - return -EINVAL; - } - iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); - rcu_read_unlock(); - break; - case SF_INIT_OFF: - iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); - break; - default: - WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", - new_state); - return -EINVAL; - } - - ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, - sizeof(sf_cmd), &sf_cmd); - if (!ret) - mvm->sf_state = new_state; - - return ret; -} - -/* - * Update Smart fifo: - * Count bound interfaces that are not to be removed, ignoring p2p devices, - * and set new state accordingly. - */ -int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, - bool remove_vif) -{ - enum iwl_sf_state new_state; - u8 sta_id = IWL_MVM_STATION_COUNT; - struct iwl_mvm_vif *mvmvif = NULL; - struct iwl_mvm_active_iface_iterator_data data = { - .ignore_vif = changed_vif, - .sta_vif_state = SF_UNINIT, - .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, - }; - - /* - * Ignore the call if we are in HW Restart flow, or if the handled - * vif is a p2p device. - */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || - (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE)) - return 0; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bound_iface_iterator, - &data); - - /* If changed_vif exists and is not to be removed, add to the count */ - if (changed_vif && !remove_vif) - data.num_active_macs++; - - switch (data.num_active_macs) { - case 0: - /* If there are no active macs - change state to SF_INIT_OFF */ - new_state = SF_INIT_OFF; - break; - case 1: - if (remove_vif) { - /* The one active mac left is of type station - * and we filled the relevant data during iteration - */ - new_state = data.sta_vif_state; - sta_id = data.sta_vif_ap_sta_id; - } else { - if (WARN_ON(!changed_vif)) - return -EINVAL; - if (changed_vif->type != NL80211_IFTYPE_STATION) { - new_state = SF_UNINIT; - } else if (changed_vif->bss_conf.assoc && - changed_vif->bss_conf.dtim_period) { - mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); - sta_id = mvmvif->ap_sta_id; - new_state = SF_FULL_ON; - } else { - new_state = SF_INIT_OFF; - } - } - break; - default: - /* If there are multiple active macs - change to SF_UNINIT */ - new_state = SF_UNINIT; - } - return iwl_mvm_sf_config(mvm, sta_id, new_state); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c deleted file mode 100644 index 300a249486e4..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ /dev/null @@ -1,1810 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include - -#include "mvm.h" -#include "sta.h" -#include "rs.h" - -static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, - enum nl80211_iftype iftype) -{ - int sta_id; - u32 reserved_ids = 0; - - BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); - WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); - - lockdep_assert_held(&mvm->mutex); - - /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ - if (iftype != NL80211_IFTYPE_STATION) - reserved_ids = BIT(0); - - /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ - for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { - if (BIT(sta_id) & reserved_ids) - continue; - - if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex))) - return sta_id; - } - return IWL_MVM_STATION_COUNT; -} - -/* send station add/update command to firmware */ -int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update) -{ - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_add_sta_cmd add_sta_cmd = { - .sta_id = mvm_sta->sta_id, - .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), - .add_modify = update ? 1 : 0, - .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | - STA_FLG_MIMO_EN_MSK), - }; - int ret; - u32 status; - u32 agg_size = 0, mpdu_dens = 0; - - if (!update) { - add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); - memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); - } - - switch (sta->bandwidth) { - case IEEE80211_STA_RX_BW_160: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); - /* fall through */ - case IEEE80211_STA_RX_BW_80: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); - /* fall through */ - case IEEE80211_STA_RX_BW_40: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); - /* fall through */ - case IEEE80211_STA_RX_BW_20: - if (sta->ht_cap.ht_supported) - add_sta_cmd.station_flags |= - cpu_to_le32(STA_FLG_FAT_EN_20MHZ); - break; - } - - switch (sta->rx_nss) { - case 1: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); - break; - case 2: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); - break; - case 3 ... 8: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); - break; - } - - switch (sta->smps_mode) { - case IEEE80211_SMPS_AUTOMATIC: - case IEEE80211_SMPS_NUM_MODES: - WARN_ON(1); - break; - case IEEE80211_SMPS_STATIC: - /* override NSS */ - add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); - break; - case IEEE80211_SMPS_DYNAMIC: - add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); - break; - case IEEE80211_SMPS_OFF: - /* nothing */ - break; - } - - if (sta->ht_cap.ht_supported) { - add_sta_cmd.station_flags_msk |= - cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | - STA_FLG_AGG_MPDU_DENS_MSK); - - mpdu_dens = sta->ht_cap.ampdu_density; - } - - if (sta->vht_cap.vht_supported) { - agg_size = sta->vht_cap.cap & - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - agg_size >>= - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - } else if (sta->ht_cap.ht_supported) { - agg_size = sta->ht_cap.ampdu_factor; - } - - add_sta_cmd.station_flags |= - cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); - add_sta_cmd.station_flags |= - cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), - &add_sta_cmd, &status); - if (ret) - return ret; - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); - break; - default: - ret = -EIO; - IWL_ERR(mvm, "ADD_STA failed\n"); - break; - } - - return ret; -} - -static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - unsigned long used_hw_queues; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, true, false); - u32 ac; - - lockdep_assert_held(&mvm->mutex); - - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); - - /* Find available queues, and allocate them to the ACs */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate STA queue\n"); - return -EBUSY; - } - - __set_bit(queue, &used_hw_queues); - mvmsta->hw_queue[ac] = queue; - } - - /* Found a place for all queues - enable them */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], - mvmsta->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); - } - - return 0; -} - -static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned long sta_msk; - int i; - - lockdep_assert_held(&mvm->mutex); - - /* disable the TDLS STA-specific queues */ - sta_msk = mvmsta->tfd_queue_msk; - for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); -} - -int iwl_mvm_add_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - int i, ret, sta_id; - - lockdep_assert_held(&mvm->mutex); - - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - sta_id = iwl_mvm_find_free_sta_id(mvm, - ieee80211_vif_type_p2p(vif)); - else - sta_id = mvm_sta->sta_id; - - if (sta_id == IWL_MVM_STATION_COUNT) - return -ENOSPC; - - if (vif->type == NL80211_IFTYPE_AP) { - mvmvif->ap_assoc_sta_count++; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - - spin_lock_init(&mvm_sta->lock); - - mvm_sta->sta_id = sta_id; - mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color); - mvm_sta->vif = vif; - mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - mvm_sta->tx_protection = 0; - mvm_sta->tt_tx_protection = false; - - /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm->pending_frames[sta_id], 0); - mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ - mvm_sta->tfd_queue_msk = 0; - - /* allocate new queues for a TDLS station */ - if (sta->tdls) { - ret = iwl_mvm_tdls_sta_init(mvm, sta); - if (ret) - return ret; - } else { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - - /* for HW restart - reset everything but the sequence number */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = mvm_sta->tid_data[i].seq_number; - memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); - mvm_sta->tid_data[i].seq_number = seq; - } - mvm_sta->agg_tids = 0; - - ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); - if (ret) - goto err; - - if (vif->type == NL80211_IFTYPE_STATION) { - if (!sta->tdls) { - WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); - mvmvif->ap_sta_id = sta_id; - } else { - WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); - } - } - - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); - - return 0; - -err: - iwl_mvm_tdls_sta_deinit(mvm, sta); - return ret; -} - -int iwl_mvm_update_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - return iwl_mvm_sta_send_to_fw(mvm, sta, true); -} - -int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - bool drain) -{ - struct iwl_mvm_add_sta_cmd cmd = {}; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; - cmd.add_modify = STA_MODE_MODIFY; - cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; - cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), - &cmd, &status); - if (ret) - return ret; - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", - mvmsta->sta_id); - break; - default: - ret = -EIO; - IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", - mvmsta->sta_id); - break; - } - - return ret; -} - -/* - * Remove a station from the FW table. Before sending the command to remove - * the station validate that the station is indeed known to the driver (sanity - * only). - */ -static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { - .sta_id = sta_id, - }; - int ret; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* Note: internal stations are marked as error values */ - if (!sta) { - IWL_ERR(mvm, "Invalid station id\n"); - return -EINVAL; - } - - ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, - sizeof(rm_sta_cmd), &rm_sta_cmd); - if (ret) { - IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); - return ret; - } - - return 0; -} - -void iwl_mvm_sta_drained_wk(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); - u8 sta_id; - - /* - * The mutex is needed because of the SYNC cmd, but not only: if the - * work would run concurrently with iwl_mvm_rm_sta, it would run before - * iwl_mvm_rm_sta sets the station as busy, and exit. Then - * iwl_mvm_rm_sta would set the station as busy, and nobody will clean - * that later. - */ - mutex_lock(&mvm->mutex); - - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { - int ret; - struct ieee80211_sta *sta = - rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* - * This station is in use or RCU-removed; the latter happens in - * managed mode, where mac80211 removes the station before we - * can remove it from firmware (we can only do that after the - * MAC is marked unassociated), and possibly while the deauth - * frame to disconnect from the AP is still queued. Then, the - * station pointer is -ENOENT when the last skb is reclaimed. - */ - if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) - continue; - - if (PTR_ERR(sta) == -EINVAL) { - IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", - sta_id); - continue; - } - - if (!sta) { - IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", - sta_id); - continue; - } - - WARN_ON(PTR_ERR(sta) != -EBUSY); - /* This station was removed and we waited until it got drained, - * we can now proceed and remove it. - */ - ret = iwl_mvm_rm_sta_common(mvm, sta_id); - if (ret) { - IWL_ERR(mvm, - "Couldn't remove sta %d after it was drained\n", - sta_id); - continue; - } - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); - clear_bit(sta_id, mvm->sta_drained); - - if (mvm->tfd_drained[sta_id]) { - unsigned long i, msk = mvm->tfd_drained[sta_id]; - - for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, - IWL_MAX_TID_COUNT, 0); - - mvm->tfd_drained[sta_id] = 0; - IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", - sta_id, msk); - } - } - - mutex_unlock(&mvm->mutex); -} - -int iwl_mvm_rm_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - int ret; - - lockdep_assert_held(&mvm->mutex); - - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) { - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - if (ret) - return ret; - /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); - if (ret) - return ret; - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, - mvm_sta->tfd_queue_msk); - if (ret) - return ret; - ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - - /* if we are associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; - - /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; - - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - } - - /* - * This shouldn't happen - the TDLS channel switch should be canceled - * before the STA is removed. - */ - if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; - cancel_delayed_work(&mvm->tdls_cs.dwork); - } - - /* - * Make sure that the tx response code sees the station as -EBUSY and - * calls the drain worker. - */ - spin_lock_bh(&mvm_sta->lock); - /* - * There are frames pending on the AC queues for this station. - * We need to wait until all the frames are drained... - */ - if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], - ERR_PTR(-EBUSY)); - spin_unlock_bh(&mvm_sta->lock); - - /* disable TDLS sta queues on drain complete */ - if (sta->tdls) { - mvm->tfd_drained[mvm_sta->sta_id] = - mvm_sta->tfd_queue_msk; - IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", - mvm_sta->sta_id); - } - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - } else { - spin_unlock_bh(&mvm_sta->lock); - - if (sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); - - ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); - } - - return ret; -} - -int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u8 sta_id) -{ - int ret = iwl_mvm_rm_sta_common(mvm, sta_id); - - lockdep_assert_held(&mvm->mutex); - - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); - return ret; -} - -static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype) -{ - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) - return -ENOSPC; - } - - sta->tfd_queue_msk = qmask; - - /* put a non-NULL value so iterating over the stations won't stop */ - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); - return 0; -} - -static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta) -{ - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); - memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); - sta->sta_id = IWL_MVM_STATION_COUNT; -} - -static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta, - const u8 *addr, - u16 mac_id, u16 color) -{ - struct iwl_mvm_add_sta_cmd cmd; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - memset(&cmd, 0, sizeof(cmd)); - cmd.sta_id = sta->sta_id; - cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, - color)); - - cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); - - if (addr) - memcpy(cmd.addr, addr, ETH_ALEN); - - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), - &cmd, &status); - if (ret) - return ret; - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "Internal station added.\n"); - return 0; - default: - ret = -EIO; - IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", - status); - break; - } - return ret; -} - -int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) -{ - unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? - mvm->cfg->base_params->wd_timeout : - IWL_WATCHDOG_DISABLED; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Map Aux queue to fifo - needs to happen before adding Aux station */ - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - - /* Allocate aux station and assign to it the aux queue */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), - NL80211_IFTYPE_UNSPECIFIED); - if (ret) - return ret; - - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, - MAC_INDEX_AUX, 0); - - if (ret) - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); - return ret; -} - -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) -{ - lockdep_assert_held(&mvm->mutex); - - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); -} - -/* - * Send the add station command for the vif's broadcast station. - * Assumes that the station was already allocated. - * - * @mvm: the mvm component - * @vif: the interface to which the broadcast station is added - * @bsta: the broadcast station to add. - */ -int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; - static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; - const u8 *baddr = _baddr; - - lockdep_assert_held(&mvm->mutex); - - if (vif->type == NL80211_IFTYPE_ADHOC) - baddr = vif->bss_conf.bssid; - - if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) - return -ENOSPC; - - return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, - mvmvif->id, mvmvif->color); -} - -/* Send the FW a request to remove the station from it's internal data - * structures, but DO NOT remove the entry from the local data structures. */ -int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); - if (ret) - IWL_WARN(mvm, "Failed sending remove station\n"); - return ret; -} - -int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask; - - lockdep_assert_held(&mvm->mutex); - - qmask = iwl_mvm_mac_get_queues_mask(vif); - - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. - */ - if (vif->type == NL80211_IFTYPE_AP) - qmask &= ~BIT(vif->cab_queue); - - return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, - ieee80211_vif_type_p2p(vif)); -} - -/* Allocate a new station entry for the broadcast station to the given vif, - * and send it to the FW. - * Note that each P2P mac should have its own broadcast station. - * - * @mvm: the mvm component - * @vif: the interface to which the broadcast station is added - * @bsta: the broadcast station to add. */ -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_alloc_bcast_sta(mvm, vif); - if (ret) - return ret; - - ret = iwl_mvm_send_add_bcast_sta(mvm, vif); - - if (ret) - iwl_mvm_dealloc_int_sta(mvm, bsta); - - return ret; -} - -void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); -} - -/* - * Send the FW a request to remove the station from it's internal data - * structures, and in addition remove it from the local data structure. - */ -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); - - iwl_mvm_dealloc_bcast_sta(mvm, vif); - - return ret; -} - -#define IWL_MAX_RX_BA_SESSIONS 16 - -int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start) -{ - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_add_sta_cmd cmd = {}; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { - IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); - return -ENOSPC; - } - - cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); - cmd.sta_id = mvm_sta->sta_id; - cmd.add_modify = STA_MODE_MODIFY; - if (start) { - cmd.add_immediate_ba_tid = (u8) tid; - cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); - } else { - cmd.remove_immediate_ba_tid = (u8) tid; - } - cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : - STA_MODIFY_REMOVE_BA_TID; - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), - &cmd, &status); - if (ret) - return ret; - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", - start ? "start" : "stopp"); - break; - case ADD_STA_IMMEDIATE_BA_FAILURE: - IWL_WARN(mvm, "RX BA Session refused by fw\n"); - ret = -ENOSPC; - break; - default: - ret = -EIO; - IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", - start ? "start" : "stopp", status); - break; - } - - if (!ret) { - if (start) - mvm->rx_ba_sessions++; - else if (mvm->rx_ba_sessions > 0) - /* check that restart flow didn't zero the counter */ - mvm->rx_ba_sessions--; - } - - return ret; -} - -static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u8 queue, bool start) -{ - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_add_sta_cmd cmd = {}; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - if (start) { - mvm_sta->tfd_queue_msk |= BIT(queue); - mvm_sta->tid_disable_agg &= ~BIT(tid); - } else { - mvm_sta->tfd_queue_msk &= ~BIT(queue); - mvm_sta->tid_disable_agg |= BIT(tid); - } - - cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); - cmd.sta_id = mvm_sta->sta_id; - cmd.add_modify = STA_MODE_MODIFY; - cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; - cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); - cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), - &cmd, &status); - if (ret) - return ret; - - switch (status) { - case ADD_STA_SUCCESS: - break; - default: - ret = -EIO; - IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", - start ? "start" : "stopp", status); - break; - } - - return ret; -} - -const u8 tid_to_mac80211_ac[] = { - IEEE80211_AC_BE, - IEEE80211_AC_BK, - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_VO, -}; - -static const u8 tid_to_ucode_ac[] = { - AC_BE, - AC_BK, - AC_BK, - AC_BE, - AC_VI, - AC_VI, - AC_VO, - AC_VO, -}; - -int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data; - int txq_id; - int ret; - - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) - return -EINVAL; - - if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { - IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", - mvmsta->tid_data[tid].state); - return -ENXIO; - } - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvmsta->lock); - - /* possible race condition - we entered D0i3 while starting agg */ - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { - spin_unlock_bh(&mvmsta->lock); - IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); - return -EIO; - } - - spin_lock_bh(&mvm->queue_info_lock); - - txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, - mvm->last_agg_queue); - if (txq_id < 0) { - ret = txq_id; - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Failed to allocate agg queue\n"); - goto release_locks; - } - mvm->queue_info[txq_id].setup_reserved = true; - spin_unlock_bh(&mvm->queue_info_lock); - - tid_data = &mvmsta->tid_data[tid]; - tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); - tid_data->txq_id = txq_id; - *ssn = tid_data->ssn; - - IWL_DEBUG_TX_QUEUES(mvm, - "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->ssn, - tid_data->next_reclaimed); - - if (tid_data->ssn == tid_data->next_reclaimed) { - tid_data->state = IWL_AGG_STARTING; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - } else { - tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; - } - - ret = 0; - -release_locks: - spin_unlock_bh(&mvmsta->lock); - - return ret; -} - -int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); - int queue, fifo, ret; - u16 ssn; - - BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) - != IWL_MAX_TID_COUNT); - - buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); - - spin_lock_bh(&mvmsta->lock); - ssn = tid_data->ssn; - queue = tid_data->txq_id; - tid_data->state = IWL_AGG_ON; - mvmsta->agg_tids |= BIT(tid); - tid_data->ssn = 0xffff; - spin_unlock_bh(&mvmsta->lock); - - fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - - iwl_mvm_enable_agg_txq(mvm, queue, - vif->hw_queue[tid_to_mac80211_ac[tid]], fifo, - mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout); - - ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); - if (ret) - return -EIO; - - /* No need to mark as reserved */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].setup_reserved = false; - spin_unlock_bh(&mvm->queue_info_lock); - - /* - * Even though in theory the peer could have different - * aggregation reorder buffer sizes for different sessions, - * our ucode doesn't allow for that and has a global limit - * for each station. Therefore, use the minimum of all the - * aggregation sessions and our default value. - */ - mvmsta->max_agg_bufsize = - min(mvmsta->max_agg_bufsize, buf_size); - mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; - - IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", - sta->addr, tid); - - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); -} - -int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - u16 txq_id; - int err; - - - /* - * If mac80211 is cleaning its state, then say that we finished since - * our state has been cleared anyway. - */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - return 0; - } - - spin_lock_bh(&mvmsta->lock); - - txq_id = tid_data->txq_id; - - IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->state); - - mvmsta->agg_tids &= ~BIT(tid); - - /* No need to mark as reserved anymore */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; - spin_unlock_bh(&mvm->queue_info_lock); - - switch (tid_data->state) { - case IWL_AGG_ON: - tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); - - IWL_DEBUG_TX_QUEUES(mvm, - "ssn = %d, next_recl = %d\n", - tid_data->ssn, tid_data->next_reclaimed); - - /* There are still packets for this RA / TID in the HW */ - if (tid_data->ssn != tid_data->next_reclaimed) { - tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; - err = 0; - break; - } - - tid_data->ssn = 0xffff; - tid_data->state = IWL_AGG_OFF; - spin_unlock_bh(&mvmsta->lock); - - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - - iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - iwl_mvm_disable_txq(mvm, txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); - return 0; - case IWL_AGG_STARTING: - case IWL_EMPTYING_HW_QUEUE_ADDBA: - /* - * The agg session has been stopped before it was set up. This - * can happen when the AddBA timer times out for example. - */ - - /* No barriers since we are under mutex */ - lockdep_assert_held(&mvm->mutex); - - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - tid_data->state = IWL_AGG_OFF; - err = 0; - break; - default: - IWL_ERR(mvm, - "Stopping AGG while state not ON or starting for %d on %d (%d)\n", - mvmsta->sta_id, tid, tid_data->state); - IWL_ERR(mvm, - "\ttid_data->txq_id = %d\n", tid_data->txq_id); - err = -EINVAL; - } - - spin_unlock_bh(&mvmsta->lock); - - return err; -} - -int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - u16 txq_id; - enum iwl_mvm_agg_state old_state; - - /* - * First set the agg state to OFF to avoid calling - * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. - */ - spin_lock_bh(&mvmsta->lock); - txq_id = tid_data->txq_id; - IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->state); - old_state = tid_data->state; - tid_data->state = IWL_AGG_OFF; - mvmsta->agg_tids &= ~BIT(tid); - spin_unlock_bh(&mvmsta->lock); - - /* No need to mark as reserved */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; - spin_unlock_bh(&mvm->queue_info_lock); - - if (old_state >= IWL_AGG_ON) { - iwl_mvm_drain_sta(mvm, mvmsta, true); - if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) - IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); - iwl_trans_wait_tx_queue_empty(mvm->trans, - mvmsta->tfd_queue_msk); - iwl_mvm_drain_sta(mvm, mvmsta, false); - - iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); - } - - return 0; -} - -static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) -{ - int i, max = -1, max_offs = -1; - - lockdep_assert_held(&mvm->mutex); - - /* Pick the unused key offset with the highest 'deleted' - * counter. Every time a key is deleted, all the counters - * are incremented and the one that was just deleted is - * reset to zero. Thus, the highest counter is the one - * that was deleted longest ago. Pick that one. - */ - for (i = 0; i < STA_KEY_MAX_NUM; i++) { - if (test_bit(i, mvm->fw_key_table)) - continue; - if (mvm->fw_key_deleted[i] > max) { - max = mvm->fw_key_deleted[i]; - max_offs = i; - } - } - - if (max_offs < 0) - return STA_KEY_IDX_INVALID; - - __set_bit(max_offs, mvm->fw_key_table); - - return max_offs; -} - -static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (sta) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - - return mvm_sta->sta_id; - } - - /* - * The device expects GTKs for station interfaces to be - * installed as GTKs for the AP station. If we have no - * station ID, then use AP's station ID. - */ - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) - return mvmvif->ap_sta_id; - - return IWL_MVM_STATION_COUNT; -} - -static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, - struct ieee80211_key_conf *keyconf, bool mcast, - u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) -{ - struct iwl_mvm_add_sta_key_cmd cmd = {}; - __le16 key_flags; - int ret; - u32 status; - u16 keyidx; - int i; - u8 sta_id = mvm_sta->sta_id; - - keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & - STA_KEY_FLG_KEYID_MSK; - key_flags = cpu_to_le16(keyidx); - key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); - cmd.tkip_rx_tsc_byte2 = tkip_iv32; - for (i = 0; i < 5; i++) - cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); - memcpy(cmd.key, keyconf->key, keyconf->keylen); - break; - case WLAN_CIPHER_SUITE_CCMP: - key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); - memcpy(cmd.key, keyconf->key, keyconf->keylen); - break; - case WLAN_CIPHER_SUITE_WEP104: - key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); - memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); - break; - default: - key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); - memcpy(cmd.key, keyconf->key, keyconf->keylen); - } - - if (mcast) - key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - - cmd.key_offset = keyconf->hw_key_idx; - cmd.key_flags = key_flags; - cmd.sta_id = sta_id; - - status = ADD_STA_SUCCESS; - if (cmd_flags & CMD_ASYNC) - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, - sizeof(cmd), &cmd); - else - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); - break; - default: - ret = -EIO; - IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); - break; - } - - return ret; -} - -static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, - struct ieee80211_key_conf *keyconf, - u8 sta_id, bool remove_key) -{ - struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; - - /* verify the key details match the required command's expectations */ - if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) || - (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || - (keyconf->keyidx != 4 && keyconf->keyidx != 5))) - return -EINVAL; - - igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); - igtk_cmd.sta_id = cpu_to_le32(sta_id); - - if (remove_key) { - igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); - } else { - struct ieee80211_key_seq seq; - const u8 *pn; - - memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); - ieee80211_get_key_rx_seq(keyconf, 0, &seq); - pn = seq.aes_cmac.pn; - igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | - ((u64) pn[4] << 8) | - ((u64) pn[3] << 16) | - ((u64) pn[2] << 24) | - ((u64) pn[1] << 32) | - ((u64) pn[0] << 40)); - } - - IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", - remove_key ? "removing" : "installing", - igtk_cmd.sta_id); - - return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, - sizeof(igtk_cmd), &igtk_cmd); -} - - -static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (sta) - return sta->addr; - - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { - u8 sta_id = mvmvif->ap_sta_id; - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - return sta->addr; - } - - - return NULL; -} - -static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *keyconf, - bool mcast) -{ - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - int ret; - const u8 *addr; - struct ieee80211_key_seq seq; - u16 p1k[5]; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - addr = iwl_mvm_get_mac_addr(mvm, vif, sta); - /* get phase 1 key from mac80211 */ - ieee80211_get_key_rx_seq(keyconf, 0, &seq); - ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); - ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - seq.tkip.iv32, p1k, 0); - break; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - 0, NULL, 0); - break; - default: - ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - 0, NULL, 0); - } - - return ret; -} - -static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, - struct ieee80211_key_conf *keyconf, - bool mcast) -{ - struct iwl_mvm_add_sta_key_cmd cmd = {}; - __le16 key_flags; - int ret; - u32 status; - - key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & - STA_KEY_FLG_KEYID_MSK); - key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); - key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); - - if (mcast) - key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - - cmd.key_flags = key_flags; - cmd.key_offset = keyconf->hw_key_idx; - cmd.sta_id = sta_id; - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); - break; - default: - ret = -EIO; - IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); - break; - } - - return ret; -} - -int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *keyconf, - bool have_key_offset) -{ - bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); - u8 sta_id; - int ret; - static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; - - lockdep_assert_held(&mvm->mutex); - - /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(vif, sta); - if (sta_id == IWL_MVM_STATION_COUNT) { - IWL_ERR(mvm, "Failed to find station id\n"); - return -EINVAL; - } - - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { - ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); - goto end; - } - - /* - * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station table. - */ - if (!sta) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, "Invalid station id\n"); - return -EINVAL; - } - } - - if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) - return -EINVAL; - - if (!have_key_offset) { - /* - * The D3 firmware hardcodes the PTK offset to 0, so we have to - * configure it there. As a result, this workaround exists to - * let the caller set the key offset (hw_key_idx), see d3.c. - */ - keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); - if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) - return -ENOSPC; - } - - ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); - if (ret) { - __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); - goto end; - } - - /* - * For WEP, the same key is used for multicast and unicast. Upload it - * again, using the same key offset, and now pointing the other one - * to the same key slot (offset). - * If this fails, remove the original as well. - */ - if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || - keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { - ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); - if (ret) { - __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); - __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); - } - } - -end: - IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", - keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta ? sta->addr : zero_addr, ret); - return ret; -} - -int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *keyconf) -{ - bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); - u8 sta_id; - int ret, i; - - lockdep_assert_held(&mvm->mutex); - - /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(vif, sta); - - IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", - keyconf->keyidx, sta_id); - - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) - return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); - - if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { - IWL_ERR(mvm, "offset %d not used in fw key table.\n", - keyconf->hw_key_idx); - return -ENOENT; - } - - /* track which key was deleted last */ - for (i = 0; i < STA_KEY_MAX_NUM; i++) { - if (mvm->fw_key_deleted[i] < U8_MAX) - mvm->fw_key_deleted[i]++; - } - mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; - - if (sta_id == IWL_MVM_STATION_COUNT) { - IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); - return 0; - } - - /* - * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station table, - * for example when a GTK is removed (where the sta_id will then be - * the AP ID, and no station was passed by mac80211.) - */ - if (!sta) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - if (!sta) { - IWL_ERR(mvm, "Invalid station id\n"); - return -EINVAL; - } - } - - if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) - return -EINVAL; - - ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); - if (ret) - return ret; - - /* delete WEP key twice to get rid of (now useless) offset */ - if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || - keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) - ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); - - return ret; -} - -void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, - u16 *phase1key) -{ - struct iwl_mvm_sta *mvm_sta; - u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); - bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); - - if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) - return; - - rcu_read_lock(); - - if (!sta) { - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON(IS_ERR_OR_NULL(sta))) { - rcu_read_unlock(); - return; - } - } - - mvm_sta = iwl_mvm_sta_from_mac80211(sta); - iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - iv32, phase1key, CMD_ASYNC); - rcu_read_unlock(); -} - -void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_add_sta_cmd cmd = { - .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, - .station_flags_msk = cpu_to_le32(STA_FLG_PS), - .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), - }; - int ret; - - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); -} - -void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - enum ieee80211_frame_release_type reason, - u16 cnt, u16 tids, bool more_data, - bool agg) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_add_sta_cmd cmd = { - .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, - .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, - .sleep_tx_count = cpu_to_le16(cnt), - .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), - }; - int tid, ret; - unsigned long _tids = tids; - - /* convert TIDs to ACs - we don't support TSPEC so that's OK - * Note that this field is reserved and unused by firmware not - * supporting GO uAPSD, so it's safe to always do this. - */ - for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) - cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); - - /* If we're releasing frames from aggregation queues then check if the - * all queues combined that we're releasing frames from have - * - more frames than the service period, in which case more_data - * needs to be set - * - fewer than 'cnt' frames, in which case we need to adjust the - * firmware command (but do that unconditionally) - */ - if (agg) { - int remaining = cnt; - - spin_lock_bh(&mvmsta->lock); - for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { - struct iwl_mvm_tid_data *tid_data; - u16 n_queued; - - tid_data = &mvmsta->tid_data[tid]; - if (WARN(tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, - "TID %d state is %d\n", - tid, tid_data->state)) { - spin_unlock_bh(&mvmsta->lock); - ieee80211_sta_eosp(sta); - return; - } - - n_queued = iwl_mvm_tid_queued(tid_data); - if (n_queued > remaining) { - more_data = true; - remaining = 0; - break; - } - remaining -= n_queued; - } - spin_unlock_bh(&mvmsta->lock); - - cmd.sleep_tx_count = cpu_to_le16(cnt - remaining); - if (WARN_ON(cnt - remaining == 0)) { - ieee80211_sta_eosp(sta); - return; - } - } - - /* Note: this is ignored by firmware not supporting GO uAPSD */ - if (more_data) - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); - - if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { - mvmsta->next_status_eosp = true; - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); - } else { - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); - } - - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); -} - -void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; - struct ieee80211_sta *sta; - u32 sta_id = le32_to_cpu(notif->sta_id); - - if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) - return; - - rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (!IS_ERR_OR_NULL(sta)) - ieee80211_sta_eosp(sta); - rcu_read_unlock(); -} - -void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, bool disable) -{ - struct iwl_mvm_add_sta_cmd cmd = { - .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, - .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, - .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), - .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), - }; - int ret; - - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); -} - -void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - bool disable) -{ - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - - spin_lock_bh(&mvm_sta->lock); - - if (mvm_sta->disable_tx == disable) { - spin_unlock_bh(&mvm_sta->lock); - return; - } - - mvm_sta->disable_tx = disable; - - /* - * Tell mac80211 to start/stop queuing tx for this station, - * but don't stop queuing if there are still pending frames - * for this station. - */ - if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) - ieee80211_sta_block_awake(mvm->hw, sta, disable); - - iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); - - spin_unlock_bh(&mvm_sta->lock); -} - -void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - bool disable) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvm_sta; - int i; - - lockdep_assert_held(&mvm->mutex); - - /* Block/unblock all the stations of the given mvmvif */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) - continue; - - mvm_sta = iwl_mvm_sta_from_mac80211(sta); - if (mvm_sta->mac_id_n_color != - FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) - continue; - - iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); - } -} - -void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_sta *mvmsta; - - rcu_read_lock(); - - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); - - if (!WARN_ON(!mvmsta)) - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); - - rcu_read_unlock(); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h deleted file mode 100644 index eedb215eba3f..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ /dev/null @@ -1,426 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __sta_h__ -#define __sta_h__ - -#include -#include -#include - -#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ -#include "fw-api.h" /* IWL_MVM_STATION_COUNT */ -#include "rs.h" - -struct iwl_mvm; -struct iwl_mvm_vif; - -/** - * DOC: station table - introduction - * - * The station table is a list of data structure that reprensent the stations. - * In STA/P2P client mode, the driver will hold one station for the AP/ GO. - * In GO/AP mode, the driver will have as many stations as associated clients. - * All these stations are reflected in the fw's station table. The driver - * keeps the fw's station table up to date with the ADD_STA command. Stations - * can be removed by the REMOVE_STA command. - * - * All the data related to a station is held in the structure %iwl_mvm_sta - * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. - * This data includes the index of the station in the fw, per tid information - * (sequence numbers, Block-ack state machine, etc...). The stations are - * created and deleted by the %sta_state callback from %ieee80211_ops. - * - * The driver holds a map: %fw_id_to_mac_id that allows to fetch a - * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw - * station index. That way, the driver is able to get the tid related data in - * O(1) in time sensitive paths (Tx / Tx response / BA notification). These - * paths are triggered by the fw, and the driver needs to get a pointer to the - * %ieee80211 structure. This map helps to get that pointer quickly. - */ - -/** - * DOC: station table - locking - * - * As stated before, the station is created / deleted by mac80211's %sta_state - * callback from %ieee80211_ops which can sleep. The next paragraph explains - * the locking of a single stations, the next ones relates to the station - * table. - * - * The station holds the sequence number per tid. So this data needs to be - * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack - * information (the state machine / and the logic that checks if the queues - * were drained), so it also needs to be accessible from the Tx response flow. - * In short, the station needs to be access from sleepable context as well as - * from tasklets, so the station itself needs a spinlock. - * - * The writers of %fw_id_to_mac_id map are serialized by the global mutex of - * the mvm op_mode. This is possible since %sta_state can sleep. - * The pointers in this map are RCU protected, hence we won't replace the - * station while we have Tx / Tx response / BA notification running. - * - * If a station is deleted while it still has packets in its A-MPDU queues, - * then the reclaim flow will notice that there is no station in the map for - * sta_id and it will dump the responses. - */ - -/** - * DOC: station table - internal stations - * - * The FW needs a few internal stations that are not reflected in - * mac80211, such as broadcast station in AP / GO mode, or AUX sta for - * scanning and P2P device (during the GO negotiation). - * For these kind of stations we have %iwl_mvm_int_sta struct which holds the - * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. - * Usually the data for these stations is static, so no locking is required, - * and no TID data as this is also not needed. - * One thing to note, is that these stations have an ID in the fw, but not - * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id - * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of - * pointers from this mapping need to check that the value is not error - * or NULL. - * - * Currently there is only one auxiliary station for scanning, initialized - * on init. - */ - -/** - * DOC: station table - AP Station in STA mode - * - * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: - * %ap_sta_id. To get the point to the corresponding %ieee80211_sta, - * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove - * the AP station from the fw before setting the MAC context as unassociated. - * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is - * removed by mac80211, but the station won't be removed in the fw until the - * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. - */ - -/** - * DOC: station table - Drain vs. Flush - * - * Flush means that all the frames in the SCD queue are dumped regardless the - * station to which they were sent. We do that when we disassociate and before - * we remove the STA of the AP. The flush can be done synchronously against the - * fw. - * Drain means that the fw will drop all the frames sent to a specific station. - * This is useful when a client (if we are IBSS / GO or AP) disassociates. In - * that case, we need to drain all the frames for that client from the AC queues - * that are shared with the other clients. Only then, we can remove the STA in - * the fw. In order to do so, we track the non-AMPDU packets for each station. - * If mac80211 removes a STA and if it still has non-AMPDU packets pending in - * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all - * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped - * (we know about it with its Tx response), we remove the station in fw and set - * it as %NULL in %fw_id_to_mac_id: this is the purpose of - * %iwl_mvm_sta_drained_wk. - */ - -/** - * DOC: station table - fw restart - * - * When the fw asserts, or we have any other issue that requires to reset the - * driver, we require mac80211 to reconfigure the driver. Since the private - * data of the stations is embed in mac80211's %ieee80211_sta, that data will - * not be zeroed and needs to be reinitialized manually. - * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us - * that we must not allocate a new sta_id but reuse the previous one. This - * means that the stations being re-added after the reset will have the same - * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id - * map, since the stations aren't in the fw any more. Internal stations that - * are not added by mac80211 will be re-added in the init flow that is called - * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to - * %iwl_mvm_up. - */ - -/** - * DOC: AP mode - PS - * - * When a station is asleep, the fw will set it as "asleep". All frames on - * shared queues (i.e. non-aggregation queues) to that station will be dropped - * by the fw (%TX_STATUS_FAIL_DEST_PS failure code). - * - * AMPDUs are in a separate queue that is stopped by the fw. We just need to - * let mac80211 know when there are frames in these queues so that it can - * properly handle trigger frames. - * - * When a trigger frame is received, mac80211 tells the driver to send frames - * from the AMPDU queues or sends frames to non-aggregation queues itself, - * depending on which ACs are delivery-enabled and what TID has frames to - * transmit. Note that mac80211 has all the knowledge since all the non-agg - * frames are buffered / filtered, and the driver tells mac80211 about agg - * frames). The driver needs to tell the fw to let frames out even if the - * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count. - * - * When we receive a frame from that station with PM bit unset, the driver - * needs to let the fw know that this station isn't asleep any more. This is - * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the - * station's wakeup. - * - * For a GO, the Service Period might be cut short due to an absence period - * of the GO. In this (and all other cases) the firmware notifies us with the - * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we - * already sent to the device will be rejected again. - * - * See also "AP support for powersaving clients" in mac80211.h. - */ - -/** - * enum iwl_mvm_agg_state - * - * The state machine of the BA agreement establishment / tear down. - * These states relate to a specific RA / TID. - * - * @IWL_AGG_OFF: aggregation is not used - * @IWL_AGG_STARTING: aggregation are starting (between start and oper) - * @IWL_AGG_ON: aggregation session is up - * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the - * HW queue to be empty from packets for this RA /TID. - */ -enum iwl_mvm_agg_state { - IWL_AGG_OFF = 0, - IWL_AGG_STARTING, - IWL_AGG_ON, - IWL_EMPTYING_HW_QUEUE_ADDBA, - IWL_EMPTYING_HW_QUEUE_DELBA, -}; - -/** - * struct iwl_mvm_tid_data - holds the states for each RA / TID - * @seq_number: the next WiFi sequence number to use - * @next_reclaimed: the WiFi sequence number of the next packet to be acked. - * This is basically (last acked packet++). - * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the - * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). - * @reduced_tpc: Reduced tx power. Holds the data between the - * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). - * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session - * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or - * the first packet to be sent in legacy HW queue in Tx AGG stop flow. - * Basically when next_reclaimed reaches ssn, we can tell mac80211 that - * we are ready to finish the Tx AGG stop / start flow. - * @tx_time: medium time consumed by this A-MPDU - */ -struct iwl_mvm_tid_data { - u16 seq_number; - u16 next_reclaimed; - /* The rest is Tx AGG related */ - u32 rate_n_flags; - u8 reduced_tpc; - enum iwl_mvm_agg_state state; - u16 txq_id; - u16 ssn; - u16 tx_time; -}; - -static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) -{ - return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number), - tid_data->next_reclaimed); -} - -/** - * struct iwl_mvm_sta - representation of a station in the driver - * @sta_id: the index of the station in the fw (will be replaced by id_n_color) - * @tfd_queue_msk: the tfd queues used by the station - * @hw_queue: per-AC mapping of the TFD queues used by station - * @mac_id_n_color: the MAC context this station is linked to - * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for - * tid. - * @max_agg_bufsize: the maximal size of the AGG buffer for this station - * @bt_reduced_txpower: is reduced tx power enabled for this station - * @next_status_eosp: the next reclaimed packet is a PS-Poll response and - * we need to signal the EOSP - * @lock: lock to protect the whole struct. Since %tid_data is access from Tx - * and from Tx response flow, it needs a spinlock. - * @tid_data: per tid data. Look at %iwl_mvm_tid_data. - * @tx_protection: reference counter for controlling the Tx protection. - * @tt_tx_protection: is thermal throttling enable Tx protection? - * @disable_tx: is tx to this STA disabled? - * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) - * - * When mac80211 creates a station it reserves some space (hw->sta_data_size) - * in the structure for use by driver. This structure is placed in that - * space. - * - */ -struct iwl_mvm_sta { - u32 sta_id; - u32 tfd_queue_msk; - u8 hw_queue[IEEE80211_NUM_ACS]; - u32 mac_id_n_color; - u16 tid_disable_agg; - u8 max_agg_bufsize; - bool bt_reduced_txpower; - bool next_status_eosp; - spinlock_t lock; - struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; - struct iwl_lq_sta lq_sta; - struct ieee80211_vif *vif; - - /* Temporary, until the new TLC will control the Tx protection */ - s8 tx_protection; - bool tt_tx_protection; - - bool disable_tx; - u8 agg_tids; -}; - -static inline struct iwl_mvm_sta * -iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) -{ - return (void *)sta->drv_priv; -} - -/** - * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or - * broadcast) - * @sta_id: the index of the station in the fw (will be replaced by id_n_color) - * @tfd_queue_msk: the tfd queues used by the station - */ -struct iwl_mvm_int_sta { - u32 sta_id; - u32 tfd_queue_msk; -}; - -int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update); -int iwl_mvm_add_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int iwl_mvm_update_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int iwl_mvm_rm_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u8 sta_id); -int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - bool have_key_offset); -int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *keyconf); - -void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, u32 iv32, - u16 *phase1key); - -void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -/* AMPDU */ -int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start); -int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u16 *ssn); -int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size); -int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid); -int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid); - -int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); - -int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); - -void iwl_mvm_sta_drained_wk(struct work_struct *wk); -void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - enum ieee80211_frame_release_type reason, - u16 cnt, u16 tids, bool more_data, - bool agg); -int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - bool drain); -void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, bool disable); -void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - bool disable); -void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - bool disable); -void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); - -#endif /* __sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c deleted file mode 100644 index fe2fa5650443..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/tdls.c +++ /dev/null @@ -1,732 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include "mvm.h" -#include "time-event.h" -#include "iwl-io.h" -#include "iwl-prph.h" - -#define TU_TO_US(x) (x * 1024) -#define TU_TO_MS(x) (TU_TO_US(x) / 1000) - -void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - int i; - - lockdep_assert_held(&mvm->mutex); - - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (!sta || IS_ERR(sta) || !sta->tdls) - continue; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, - NL80211_TDLS_TEARDOWN, - WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, - GFP_KERNEL); - } -} - -int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - int count = 0; - int i; - - lockdep_assert_held(&mvm->mutex); - - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (!sta || IS_ERR(sta) || !sta->tdls) - continue; - - if (vif) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->vif != vif) - continue; - } - - count++; - } - - return count; -} - -static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct iwl_rx_packet *pkt; - struct iwl_tdls_config_res *resp; - struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; - struct iwl_host_cmd cmd = { - .id = TDLS_CONFIG_CMD, - .flags = CMD_WANT_SKB, - .data = { &tdls_cfg_cmd, }, - .len = { sizeof(struct iwl_tdls_config_cmd), }, - }; - struct ieee80211_sta *sta; - int ret, i, cnt; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - lockdep_assert_held(&mvm->mutex); - - tdls_cfg_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; - tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ - - /* for now the Tx cmd is empty and unused */ - - /* populate TDLS peer data */ - cnt = 0; - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta) || !sta->tdls) - continue; - - tdls_cfg_cmd.sta_info[cnt].sta_id = i; - tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = - IWL_MVM_TDLS_FW_TID; - tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); - tdls_cfg_cmd.sta_info[cnt].is_initiator = - cpu_to_le32(sta->tdls_initiator ? 1 : 0); - - cnt++; - } - - tdls_cfg_cmd.tdls_peer_count = cnt; - IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (WARN_ON_ONCE(ret)) - return; - - pkt = cmd.resp_pkt; - - WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); - - /* we don't really care about the response at this point */ - - iwl_free_resp(&cmd); -} - -void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool sta_added) -{ - int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); - - /* when the first peer joins, send a power update first */ - if (tdls_sta_cnt == 1 && sta_added) - iwl_mvm_power_update_mac(mvm); - - /* configure the FW with TDLS peer info */ - iwl_mvm_tdls_config(mvm, vif); - - /* when the last peer leaves, send a power update last */ - if (tdls_sta_cnt == 0 && !sta_added) - iwl_mvm_power_update_mac(mvm); -} - -void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; - - /* - * iwl_mvm_protect_session() reads directly from the device - * (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) - return; - - mutex_lock(&mvm->mutex); - /* Protect the session to hear the TDLS setup response on the channel */ - iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); -} - -static const char * -iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) -{ - switch (state) { - case IWL_MVM_TDLS_SW_IDLE: - return "IDLE"; - case IWL_MVM_TDLS_SW_REQ_SENT: - return "REQ SENT"; - case IWL_MVM_TDLS_SW_RESP_RCVD: - return "RESP RECEIVED"; - case IWL_MVM_TDLS_SW_REQ_RCVD: - return "REQ RECEIVED"; - case IWL_MVM_TDLS_SW_ACTIVE: - return "ACTIVE"; - } - - return NULL; -} - -static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, - enum iwl_mvm_tdls_cs_state state) -{ - if (mvm->tdls_cs.state == state) - return; - - IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", - iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), - iwl_mvm_tdls_cs_state_str(state)); - mvm->tdls_cs.state = state; - - /* we only send requests to our switching peer - update sent time */ - if (state == IWL_MVM_TDLS_SW_REQ_SENT) - mvm->tdls_cs.peer.sent_timestamp = - iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); - - if (state == IWL_MVM_TDLS_SW_IDLE) - mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; -} - -void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; - struct ieee80211_sta *sta; - unsigned int delay; - struct iwl_mvm_sta *mvmsta; - struct ieee80211_vif *vif; - u32 sta_id = le32_to_cpu(notif->sta_id); - - lockdep_assert_held(&mvm->mutex); - - /* can fail sometimes */ - if (!le32_to_cpu(notif->status)) { - iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); - return; - } - - if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) - return; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - /* the station may not be here, but if it is, it must be a TDLS peer */ - if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - vif = mvmsta->vif; - - /* - * Update state and possibly switch again after this is over (DTIM). - * Also convert TU to msec. - */ - delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); - mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, - msecs_to_jiffies(delay)); - - iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); -} - -static int -iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, - enum iwl_tdls_channel_switch_type type, - const u8 *peer, bool peer_initiator, u32 timestamp) -{ - bool same_peer = false; - int ret = 0; - - /* get the existing peer if it's there */ - if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && - mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { - struct ieee80211_sta *sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], - lockdep_is_held(&mvm->mutex)); - if (!IS_ERR_OR_NULL(sta)) - same_peer = ether_addr_equal(peer, sta->addr); - } - - switch (mvm->tdls_cs.state) { - case IWL_MVM_TDLS_SW_IDLE: - /* - * might be spurious packet from the peer after the switch is - * already done - */ - if (type == TDLS_MOVE_CH) - ret = -EINVAL; - break; - case IWL_MVM_TDLS_SW_REQ_SENT: - /* only allow requests from the same peer */ - if (!same_peer) - ret = -EBUSY; - else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && - !peer_initiator) - /* - * We received a ch-switch request while an outgoing - * one is pending. Allow it if the peer is the link - * initiator. - */ - ret = -EBUSY; - else if (type == TDLS_SEND_CHAN_SW_REQ) - /* wait for idle before sending another request */ - ret = -EBUSY; - else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp) - /* we got a stale response - ignore it */ - ret = -EINVAL; - break; - case IWL_MVM_TDLS_SW_RESP_RCVD: - /* - * we are waiting for the FW to give an "active" notification, - * so ignore requests in the meantime - */ - ret = -EBUSY; - break; - case IWL_MVM_TDLS_SW_REQ_RCVD: - /* as above, allow the link initiator to proceed */ - if (type == TDLS_SEND_CHAN_SW_REQ) { - if (!same_peer) - ret = -EBUSY; - else if (peer_initiator) /* they are the initiator */ - ret = -EBUSY; - } else if (type == TDLS_MOVE_CH) { - ret = -EINVAL; - } - break; - case IWL_MVM_TDLS_SW_ACTIVE: - /* - * the only valid request when active is a request to return - * to the base channel by the current off-channel peer - */ - if (type != TDLS_MOVE_CH || !same_peer) - ret = -EBUSY; - break; - } - - if (ret) - IWL_DEBUG_TDLS(mvm, - "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", - type, mvm->tdls_cs.state, peer, same_peer, - peer_initiator); - - return ret; -} - -static int -iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - enum iwl_tdls_channel_switch_type type, - const u8 *peer, bool peer_initiator, - u8 oper_class, - struct cfg80211_chan_def *chandef, - u32 timestamp, u16 switch_time, - u16 switch_timeout, struct sk_buff *skb, - u32 ch_sw_tm_ie) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - struct ieee80211_tx_info *info; - struct ieee80211_hdr *hdr; - struct iwl_tdls_channel_switch_cmd cmd = {0}; - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, - timestamp); - if (ret) - return ret; - - if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { - ret = -EINVAL; - goto out; - } - - cmd.switch_type = type; - cmd.timing.frame_timestamp = cpu_to_le32(timestamp); - cmd.timing.switch_time = cpu_to_le32(switch_time); - cmd.timing.switch_timeout = cpu_to_le32(switch_timeout); - - rcu_read_lock(); - sta = ieee80211_find_sta(vif, peer); - if (!sta) { - rcu_read_unlock(); - ret = -ENOENT; - goto out; - } - mvmsta = iwl_mvm_sta_from_mac80211(sta); - cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); - - if (!chandef) { - if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && - mvm->tdls_cs.peer.chandef.chan) { - /* actually moving to the channel */ - chandef = &mvm->tdls_cs.peer.chandef; - } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && - type == TDLS_MOVE_CH) { - /* we need to return to base channel */ - struct ieee80211_chanctx_conf *chanctx = - rcu_dereference(vif->chanctx_conf); - - if (WARN_ON_ONCE(!chanctx)) { - rcu_read_unlock(); - goto out; - } - - chandef = &chanctx->def; - } - } - - if (chandef) { - cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? - PHY_BAND_24 : PHY_BAND_5); - cmd.ci.channel = chandef->chan->hw_value; - cmd.ci.width = iwl_mvm_get_channel_width(chandef); - cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); - } - - /* keep quota calculation simple for now - 50% of DTIM for TDLS */ - cmd.timing.max_offchan_duration = - cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int) / 2); - - /* Switch time is the first element in the switch-timing IE. */ - cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); - - info = IEEE80211_SKB_CB(skb); - hdr = (void *)skb->data; - if (info->control.hw_key) { - if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { - rcu_read_unlock(); - ret = -EINVAL; - goto out; - } - iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); - } - - iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, - mvmsta->sta_id); - - iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, - hdr->frame_control); - rcu_read_unlock(); - - memcpy(cmd.frame.data, skb->data, skb->len); - - ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, - sizeof(cmd), &cmd); - if (ret) { - IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", - ret); - goto out; - } - - /* channel switch has started, update state */ - if (type != TDLS_MOVE_CH) { - mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; - iwl_mvm_tdls_update_cs_state(mvm, - type == TDLS_SEND_CHAN_SW_REQ ? - IWL_MVM_TDLS_SW_REQ_SENT : - IWL_MVM_TDLS_SW_REQ_RCVD); - } else { - iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD); - } - -out: - - /* channel switch failed - we are idle */ - if (ret) - iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); - - return ret; -} - -void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) -{ - struct iwl_mvm *mvm; - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - struct ieee80211_vif *vif; - unsigned int delay; - int ret; - - mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); - mutex_lock(&mvm->mutex); - - /* called after an active channel switch has finished or timed-out */ - iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); - - /* station might be gone, in that case do nothing */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) - goto out; - - sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], - lockdep_is_held(&mvm->mutex)); - /* the station may not be here, but if it is, it must be a TDLS peer */ - if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) - goto out; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - vif = mvmsta->vif; - ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, - TDLS_SEND_CHAN_SW_REQ, - sta->addr, - mvm->tdls_cs.peer.initiator, - mvm->tdls_cs.peer.op_class, - &mvm->tdls_cs.peer.chandef, - 0, 0, 0, - mvm->tdls_cs.peer.skb, - mvm->tdls_cs.peer.ch_sw_tm_ie); - if (ret) - IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); - - /* retry after a DTIM if we failed sending now */ - delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); - queue_delayed_work(system_wq, &mvm->tdls_cs.dwork, - msecs_to_jiffies(delay)); -out: - mutex_unlock(&mvm->mutex); -} - -int -iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u8 oper_class, - struct cfg80211_chan_def *chandef, - struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_sta *mvmsta; - unsigned int delay; - int ret; - - mutex_lock(&mvm->mutex); - - IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", - sta->addr, chandef->chan->center_freq, chandef->width); - - /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { - IWL_DEBUG_TDLS(mvm, - "Existing peer. Can't start switch with %pM\n", - sta->addr); - ret = -EBUSY; - goto out; - } - - ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, - TDLS_SEND_CHAN_SW_REQ, - sta->addr, sta->tdls_initiator, - oper_class, chandef, 0, 0, 0, - tmpl_skb, ch_sw_tm_ie); - if (ret) - goto out; - - /* - * Mark the peer as "in tdls switch" for this vif. We only allow a - * single such peer per vif. - */ - mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); - if (!mvm->tdls_cs.peer.skb) { - ret = -ENOMEM; - goto out; - } - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; - mvm->tdls_cs.peer.chandef = *chandef; - mvm->tdls_cs.peer.initiator = sta->tdls_initiator; - mvm->tdls_cs.peer.op_class = oper_class; - mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; - - /* - * Wait for 2 DTIM periods before attempting the next switch. The next - * switch will be made sooner if the current one completes before that. - */ - delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int); - mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, - msecs_to_jiffies(delay)); - -out: - mutex_unlock(&mvm->mutex); - return ret; -} - -void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct ieee80211_sta *cur_sta; - bool wait_for_phy = false; - - mutex_lock(&mvm->mutex); - - IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); - - /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { - IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); - goto out; - } - - cur_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], - lockdep_is_held(&mvm->mutex)); - /* make sure it's the same peer */ - if (cur_sta != sta) - goto out; - - /* - * If we're currently in a switch because of the now canceled peer, - * wait a DTIM here to make sure the phy is back on the base channel. - * We can't otherwise force it. - */ - if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && - mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) - wait_for_phy = true; - - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; - dev_kfree_skb(mvm->tdls_cs.peer.skb); - mvm->tdls_cs.peer.skb = NULL; - -out: - mutex_unlock(&mvm->mutex); - - /* make sure the phy is on the base channel */ - if (wait_for_phy) - msleep(TU_TO_MS(vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int)); - - /* flush the channel switch state */ - flush_delayed_work(&mvm->tdls_cs.dwork); - - IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); -} - -void -iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_tdls_ch_sw_params *params) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - enum iwl_tdls_channel_switch_type type; - unsigned int delay; - const char *action_str = - params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? - "REQ" : "RESP"; - - mutex_lock(&mvm->mutex); - - IWL_DEBUG_TDLS(mvm, - "Received TDLS ch switch action %s from %pM status %d\n", - action_str, params->sta->addr, params->status); - - /* - * we got a non-zero status from a peer we were switching to - move to - * the idle state and retry again later - */ - if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && - params->status != 0 && - mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && - mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { - struct ieee80211_sta *cur_sta; - - /* make sure it's the same peer */ - cur_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], - lockdep_is_held(&mvm->mutex)); - if (cur_sta == params->sta) { - iwl_mvm_tdls_update_cs_state(mvm, - IWL_MVM_TDLS_SW_IDLE); - goto retry; - } - } - - type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? - TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; - - iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, - params->sta->tdls_initiator, 0, - params->chandef, params->timestamp, - params->switch_time, - params->switch_timeout, - params->tmpl_skb, - params->ch_sw_tm_ie); - -retry: - /* register a timeout in case we don't succeed in switching */ - delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * - 1024 / 1000; - mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, - msecs_to_jiffies(delay)); - mutex_unlock(&mvm->mutex); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h deleted file mode 100644 index 79ab6beb6b26..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/testmode.h +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __IWL_MVM_TESTMODE_H__ -#define __IWL_MVM_TESTMODE_H__ - -/** - * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA - * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute) - * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32) - * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32) - * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32) - */ -enum iwl_mvm_testmode_attrs { - IWL_MVM_TM_ATTR_UNSPEC, - IWL_MVM_TM_ATTR_CMD, - IWL_MVM_TM_ATTR_NOA_DURATION, - IWL_MVM_TM_ATTR_BEACON_FILTER_STATE, - - /* keep last */ - NUM_IWL_MVM_TM_ATTRS, - IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1, -}; - -/** - * enum iwl_mvm_testmode_commands - MVM testmode commands - * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing - * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on - */ -enum iwl_mvm_testmode_commands { - IWL_MVM_TM_CMD_SET_NOA, - IWL_MVM_TM_CMD_SET_BEACON_FILTER, -}; - -#endif /* __IWL_MVM_TESTMODE_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c deleted file mode 100644 index 7530eb23035d..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ /dev/null @@ -1,872 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include - -#include "iwl-notif-wait.h" -#include "iwl-trans.h" -#include "fw-api.h" -#include "time-event.h" -#include "mvm.h" -#include "iwl-io.h" -#include "iwl-prph.h" - -/* - * For the high priority TE use a time event type that has similar priority to - * the FW's action scan priority. - */ -#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE -#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC - -void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, - struct iwl_mvm_time_event_data *te_data) -{ - lockdep_assert_held(&mvm->time_event_lock); - - if (!te_data->vif) - return; - - list_del(&te_data->list); - te_data->running = false; - te_data->uid = 0; - te_data->id = TE_MAX; - te_data->vif = NULL; -} - -void iwl_mvm_roc_done_wk(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); - u32 queues = 0; - - /* - * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. - * This will cause the TX path to drop offchannel transmissions. - * That would also be done by mac80211, but it is racy, in particular - * in the case that the time event actually completed in the firmware - * (which is handled in iwl_mvm_te_handle_notif). - */ - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) { - queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); - } - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { - queues |= BIT(mvm->aux_queue); - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX); - } - - synchronize_net(); - - /* - * Flush the offchannel queue -- this is called when the time - * event finishes or is canceled, so that frames queued for it - * won't get stuck on the queue and be transmitted in the next - * time event. - * We have to send the command asynchronously since this cannot - * be under the mutex for locking reasons, but that's not an - * issue as it will have to complete before the next command is - * executed, and a new time event means a new command. - */ - iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC); -} - -static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) -{ - /* - * Of course, our status bit is just as racy as mac80211, so in - * addition, fire off the work struct which will drop all frames - * from the hardware queues that made it through the race. First - * it will of course synchronize the TX path to make sure that - * any *new* TX will be rejected. - */ - schedule_work(&mvm->roc_done_wk); -} - -static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) -{ - struct ieee80211_vif *csa_vif; - - rcu_read_lock(); - - csa_vif = rcu_dereference(mvm->csa_vif); - if (!csa_vif || !csa_vif->csa_active) - goto out_unlock; - - IWL_DEBUG_TE(mvm, "CSA NOA started\n"); - - /* - * CSA NoA is started but we still have beacons to - * transmit on the current channel. - * So we just do nothing here and the switch - * will be performed on the last TBTT. - */ - if (!ieee80211_csa_is_complete(csa_vif)) { - IWL_WARN(mvm, "CSA NOA started too early\n"); - goto out_unlock; - } - - ieee80211_csa_finish(csa_vif); - - rcu_read_unlock(); - - RCU_INIT_POINTER(mvm->csa_vif, NULL); - - return; - -out_unlock: - rcu_read_unlock(); -} - -static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - const char *errmsg) -{ - if (vif->type != NL80211_IFTYPE_STATION) - return false; - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) - return false; - if (errmsg) - IWL_ERR(mvm, "%s\n", errmsg); - - iwl_mvm_connection_loss(mvm, vif, errmsg); - return true; -} - -static void -iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, - struct iwl_mvm_time_event_data *te_data, - struct iwl_time_event_notif *notif) -{ - struct ieee80211_vif *vif = te_data->vif; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (!notif->status) - IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); - - switch (te_data->vif->type) { - case NL80211_IFTYPE_AP: - if (!notif->status) - mvmvif->csa_failed = true; - iwl_mvm_csa_noa_start(mvm); - break; - case NL80211_IFTYPE_STATION: - if (!notif->status) { - iwl_mvm_connection_loss(mvm, vif, - "CSA TE failed to start"); - break; - } - iwl_mvm_csa_client_absent(mvm, te_data->vif); - ieee80211_chswitch_done(te_data->vif, true); - break; - default: - /* should never happen */ - WARN_ON_ONCE(1); - break; - } - - /* we don't need it anymore */ - iwl_mvm_te_clear_data(mvm, te_data); -} - -static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, - struct iwl_time_event_notif *notif, - struct iwl_mvm_time_event_data *te_data) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_time_event *te_trig; - int i; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); - te_trig = (void *)trig->data; - - if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig)) - return; - - for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { - u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); - u32 trig_action_bitmap = - le32_to_cpu(te_trig->time_events[i].action_bitmap); - u32 trig_status_bitmap = - le32_to_cpu(te_trig->time_events[i].status_bitmap); - - if (trig_te_id != te_data->id || - !(trig_action_bitmap & le32_to_cpu(notif->action)) || - !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) - continue; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Time event %d Action 0x%x received status: %d", - te_data->id, - le32_to_cpu(notif->action), - le32_to_cpu(notif->status)); - break; - } -} - -/* - * Handles a FW notification for an event that is known to the driver. - * - * @mvm: the mvm component - * @te_data: the time event data - * @notif: the notification data corresponding the time event data. - */ -static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, - struct iwl_mvm_time_event_data *te_data, - struct iwl_time_event_notif *notif) -{ - lockdep_assert_held(&mvm->time_event_lock); - - IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", - le32_to_cpu(notif->unique_id), - le32_to_cpu(notif->action)); - - iwl_mvm_te_check_trigger(mvm, notif, te_data); - - /* - * The FW sends the start/end time event notifications even for events - * that it fails to schedule. This is indicated in the status field of - * the notification. This happens in cases that the scheduler cannot - * find a schedule that can handle the event (for example requesting a - * P2P Device discoveribility, while there are other higher priority - * events in the system). - */ - if (!le32_to_cpu(notif->status)) { - const char *msg; - - if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) - msg = "Time Event start notification failure"; - else - msg = "Time Event end notification failure"; - - IWL_DEBUG_TE(mvm, "%s\n", msg); - - if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) { - iwl_mvm_te_clear_data(mvm, te_data); - return; - } - } - - if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { - IWL_DEBUG_TE(mvm, - "TE ended - current time %lu, estimated end %lu\n", - jiffies, te_data->end_jiffies); - - switch (te_data->vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - ieee80211_remain_on_channel_expired(mvm->hw); - iwl_mvm_roc_finished(mvm); - break; - case NL80211_IFTYPE_STATION: - /* - * By now, we should have finished association - * and know the dtim period. - */ - iwl_mvm_te_check_disconnect(mvm, te_data->vif, - "No association and the time event is over already..."); - break; - default: - break; - } - - iwl_mvm_te_clear_data(mvm, te_data); - } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { - te_data->running = true; - te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); - - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - iwl_mvm_ref(mvm, IWL_MVM_REF_ROC); - ieee80211_ready_on_channel(mvm->hw); - } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { - iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); - } - } else { - IWL_WARN(mvm, "Got TE with unknown action\n"); - } -} - -/* - * Handle A Aux ROC time event - */ -static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, - struct iwl_time_event_notif *notif) -{ - struct iwl_mvm_time_event_data *te_data, *tmp; - bool aux_roc_te = false; - - list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { - if (le32_to_cpu(notif->unique_id) == te_data->uid) { - aux_roc_te = true; - break; - } - } - if (!aux_roc_te) /* Not a Aux ROC time event */ - return -EINVAL; - - iwl_mvm_te_check_trigger(mvm, notif, te_data); - - if (!le32_to_cpu(notif->status)) { - IWL_DEBUG_TE(mvm, - "ERROR: Aux ROC Time Event %s notification failure\n", - (le32_to_cpu(notif->action) & - TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end"); - return -EINVAL; - } - - IWL_DEBUG_TE(mvm, - "Aux ROC time event notification - UID = 0x%x action %d\n", - le32_to_cpu(notif->unique_id), - le32_to_cpu(notif->action)); - - if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { - /* End TE, notify mac80211 */ - ieee80211_remain_on_channel_expired(mvm->hw); - iwl_mvm_roc_finished(mvm); /* flush aux queue */ - list_del(&te_data->list); /* remove from list */ - te_data->running = false; - te_data->vif = NULL; - te_data->uid = 0; - te_data->id = TE_MAX; - } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { - set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); - te_data->running = true; - iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX); - ieee80211_ready_on_channel(mvm->hw); /* Start TE */ - } else { - IWL_DEBUG_TE(mvm, - "ERROR: Unknown Aux ROC Time Event (action = %d)\n", - le32_to_cpu(notif->action)); - return -EINVAL; - } - - return 0; -} - -/* - * The Rx handler for time event notifications - */ -void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_time_event_notif *notif = (void *)pkt->data; - struct iwl_mvm_time_event_data *te_data, *tmp; - - IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", - le32_to_cpu(notif->unique_id), - le32_to_cpu(notif->action)); - - spin_lock_bh(&mvm->time_event_lock); - /* This time event is triggered for Aux ROC request */ - if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) - goto unlock; - - list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { - if (le32_to_cpu(notif->unique_id) == te_data->uid) - iwl_mvm_te_handle_notif(mvm, te_data, notif); - } -unlock: - spin_unlock_bh(&mvm->time_event_lock); -} - -static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - struct iwl_mvm_time_event_data *te_data = data; - struct iwl_time_event_notif *resp; - int resp_len = iwl_rx_packet_payload_len(pkt); - - if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) - return true; - - if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { - IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); - return true; - } - - resp = (void *)pkt->data; - - /* te_data->uid is already set in the TIME_EVENT_CMD response */ - if (le32_to_cpu(resp->unique_id) != te_data->uid) - return false; - - IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", - te_data->uid); - if (!resp->status) - IWL_ERR(mvm, - "TIME_EVENT_NOTIFICATION received but not executed\n"); - - return true; -} - -static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - struct iwl_mvm_time_event_data *te_data = data; - struct iwl_time_event_resp *resp; - int resp_len = iwl_rx_packet_payload_len(pkt); - - if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) - return true; - - if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { - IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); - return true; - } - - resp = (void *)pkt->data; - - /* we should never get a response to another TIME_EVENT_CMD here */ - if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) - return false; - - te_data->uid = le32_to_cpu(resp->unique_id); - IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", - te_data->uid); - return true; -} - -static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_time_event_data *te_data, - struct iwl_time_event_cmd *te_cmd) -{ - static const u16 time_event_response[] = { TIME_EVENT_CMD }; - struct iwl_notification_wait wait_time_event; - int ret; - - lockdep_assert_held(&mvm->mutex); - - IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", - le32_to_cpu(te_cmd->duration)); - - spin_lock_bh(&mvm->time_event_lock); - if (WARN_ON(te_data->id != TE_MAX)) { - spin_unlock_bh(&mvm->time_event_lock); - return -EIO; - } - te_data->vif = vif; - te_data->duration = le32_to_cpu(te_cmd->duration); - te_data->id = le32_to_cpu(te_cmd->id); - list_add_tail(&te_data->list, &mvm->time_event_list); - spin_unlock_bh(&mvm->time_event_lock); - - /* - * Use a notification wait, which really just processes the - * command response and doesn't wait for anything, in order - * to be able to process the response and get the UID inside - * the RX path. Using CMD_WANT_SKB doesn't work because it - * stores the buffer and then wakes up this thread, by which - * time another notification (that the time event started) - * might already be processed unsuccessfully. - */ - iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, - time_event_response, - ARRAY_SIZE(time_event_response), - iwl_mvm_time_event_response, te_data); - - ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, - sizeof(*te_cmd), te_cmd); - if (ret) { - IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); - iwl_remove_notification(&mvm->notif_wait, &wait_time_event); - goto out_clear_te; - } - - /* No need to wait for anything, so just pass 1 (0 isn't valid) */ - ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); - /* should never fail */ - WARN_ON_ONCE(ret); - - if (ret) { - out_clear_te: - spin_lock_bh(&mvm->time_event_lock); - iwl_mvm_te_clear_data(mvm, te_data); - spin_unlock_bh(&mvm->time_event_lock); - } - return ret; -} - -void iwl_mvm_protect_session(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration, u32 min_duration, - u32 max_delay, bool wait_for_notif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; - struct iwl_notification_wait wait_te_notif; - struct iwl_time_event_cmd time_cmd = {}; - - lockdep_assert_held(&mvm->mutex); - - if (te_data->running && - time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { - IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", - jiffies_to_msecs(te_data->end_jiffies - jiffies)); - return; - } - - if (te_data->running) { - IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", - te_data->uid, - jiffies_to_msecs(te_data->end_jiffies - jiffies)); - /* - * we don't have enough time - * cancel the current TE and issue a new one - * Of course it would be better to remove the old one only - * when the new one is added, but we don't care if we are off - * channel for a bit. All we need to do, is not to return - * before we actually begin to be on the channel. - */ - iwl_mvm_stop_session_protection(mvm, vif); - } - - time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); - time_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); - - time_cmd.apply_time = cpu_to_le32(0); - - time_cmd.max_frags = TE_V2_FRAG_NONE; - time_cmd.max_delay = cpu_to_le32(max_delay); - /* TODO: why do we need to interval = bi if it is not periodic? */ - time_cmd.interval = cpu_to_le32(1); - time_cmd.duration = cpu_to_le32(duration); - time_cmd.repeat = 1; - time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | - TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); - - if (!wait_for_notif) { - iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); - return; - } - - /* - * Create notification_wait for the TIME_EVENT_NOTIFICATION to use - * right after we send the time event - */ - iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, - te_notif_response, - ARRAY_SIZE(te_notif_response), - iwl_mvm_te_notif, te_data); - - /* If TE was sent OK - wait for the notification that started */ - if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { - IWL_ERR(mvm, "Failed to add TE to protect session\n"); - iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); - } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, - TU_TO_JIFFIES(max_delay))) { - IWL_ERR(mvm, "Failed to protect session until TE\n"); - } -} - -static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, - struct iwl_mvm_time_event_data *te_data, - u32 *uid) -{ - u32 id; - - /* - * It is possible that by the time we got to this point the time - * event was already removed. - */ - spin_lock_bh(&mvm->time_event_lock); - - /* Save time event uid before clearing its data */ - *uid = te_data->uid; - id = te_data->id; - - /* - * The clear_data function handles time events that were already removed - */ - iwl_mvm_te_clear_data(mvm, te_data); - spin_unlock_bh(&mvm->time_event_lock); - - /* - * It is possible that by the time we try to remove it, the time event - * has already ended and removed. In such a case there is no need to - * send a removal command. - */ - if (id == TE_MAX) { - IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); - return false; - } - - return true; -} - -/* - * Explicit request to remove a aux roc time event. The removal of a time - * event needs to be synchronized with the flow of a time event's end - * notification, which also removes the time event from the op mode - * data structures. - */ -static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - struct iwl_mvm_time_event_data *te_data) -{ - struct iwl_hs20_roc_req aux_cmd = {}; - u32 uid; - int ret; - - if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) - return; - - aux_cmd.event_unique_id = cpu_to_le32(uid); - aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); - aux_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", - le32_to_cpu(aux_cmd.event_unique_id)); - ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, - sizeof(aux_cmd), &aux_cmd); - - if (WARN_ON(ret)) - return; -} - -/* - * Explicit request to remove a time event. The removal of a time event needs to - * be synchronized with the flow of a time event's end notification, which also - * removes the time event from the op mode data structures. - */ -void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - struct iwl_mvm_time_event_data *te_data) -{ - struct iwl_time_event_cmd time_cmd = {}; - u32 uid; - int ret; - - if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) - return; - - /* When we remove a TE, the UID is to be set in the id field */ - time_cmd.id = cpu_to_le32(uid); - time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); - time_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - - IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); - ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, - sizeof(time_cmd), &time_cmd); - if (WARN_ON(ret)) - return; -} - -void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - - lockdep_assert_held(&mvm->mutex); - iwl_mvm_remove_time_event(mvm, mvmvif, te_data); -} - -int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - int duration, enum ieee80211_roc_type type) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - struct iwl_time_event_cmd time_cmd = {}; - - lockdep_assert_held(&mvm->mutex); - if (te_data->running) { - IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); - return -EBUSY; - } - - /* - * Flush the done work, just in case it's still pending, so that - * the work it does can complete and we can accept new frames. - */ - flush_work(&mvm->roc_done_wk); - - time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); - time_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - - switch (type) { - case IEEE80211_ROC_TYPE_NORMAL: - time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); - break; - case IEEE80211_ROC_TYPE_MGMT_TX: - time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); - break; - default: - WARN_ONCE(1, "Got an invalid ROC type\n"); - return -EINVAL; - } - - time_cmd.apply_time = cpu_to_le32(0); - time_cmd.interval = cpu_to_le32(1); - - /* - * The P2P Device TEs can have lower priority than other events - * that are being scheduled by the driver/fw, and thus it might not be - * scheduled. To improve the chances of it being scheduled, allow them - * to be fragmented, and in addition allow them to be delayed. - */ - time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); - time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); - time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); - time_cmd.repeat = 1; - time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | - TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); - - return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); -} - -void iwl_mvm_stop_roc(struct iwl_mvm *mvm) -{ - struct iwl_mvm_vif *mvmvif = NULL; - struct iwl_mvm_time_event_data *te_data; - bool is_p2p = false; - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->time_event_lock); - - /* - * Iterate over the list of time events and find the time event that is - * associated with a P2P_DEVICE interface. - * This assumes that a P2P_DEVICE interface can have only a single time - * event at any given time and this time event coresponds to a ROC - * request - */ - list_for_each_entry(te_data, &mvm->time_event_list, list) { - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - is_p2p = true; - goto remove_te; - } - } - - /* There can only be at most one AUX ROC time event, we just use the - * list to simplify/unify code. Remove it if it exists. - */ - te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, - struct iwl_mvm_time_event_data, - list); - if (te_data) - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - -remove_te: - spin_unlock_bh(&mvm->time_event_lock); - - if (!mvmvif) { - IWL_WARN(mvm, "No remain on channel event\n"); - return; - } - - if (is_p2p) - iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - else - iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); - - iwl_mvm_roc_finished(mvm); -} - -int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration, u32 apply_time) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - struct iwl_time_event_cmd time_cmd = {}; - - lockdep_assert_held(&mvm->mutex); - - if (te_data->running) { - IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); - return -EBUSY; - } - - time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); - time_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); - time_cmd.apply_time = cpu_to_le32(apply_time); - time_cmd.max_frags = TE_V2_FRAG_NONE; - time_cmd.duration = cpu_to_le32(duration); - time_cmd.repeat = 1; - time_cmd.interval = cpu_to_le32(1); - time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | - TE_V2_ABSENCE); - - return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h deleted file mode 100644 index cbdf8e52a5f1..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ /dev/null @@ -1,249 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __time_event_h__ -#define __time_event_h__ - -#include "fw-api.h" - -#include "mvm.h" - -/** - * DOC: Time Events - what is it? - * - * Time Events are a fw feature that allows the driver to control the presence - * of the device on the channel. Since the fw supports multiple channels - * concurrently, the fw may choose to jump to another channel at any time. - * In order to make sure that the fw is on a specific channel at a certain time - * and for a certain duration, the driver needs to issue a time event. - * - * The simplest example is for BSS association. The driver issues a time event, - * waits for it to start, and only then tells mac80211 that we can start the - * association. This way, we make sure that the association will be done - * smoothly and won't be interrupted by channel switch decided within the fw. - */ - - /** - * DOC: The flow against the fw - * - * When the driver needs to make sure we are in a certain channel, at a certain - * time and for a certain duration, it sends a Time Event. The flow against the - * fw goes like this: - * 1) Driver sends a TIME_EVENT_CMD to the fw - * 2) Driver gets the response for that command. This response contains the - * Unique ID (UID) of the event. - * 3) The fw sends notification when the event starts. - * - * Of course the API provides various options that allow to cover parameters - * of the flow. - * What is the duration of the event? - * What is the start time of the event? - * Is there an end-time for the event? - * How much can the event be delayed? - * Can the event be split? - * If yes what is the maximal number of chunks? - * etc... - */ - -/** - * DOC: Abstraction to the driver - * - * In order to simplify the use of time events to the rest of the driver, - * we abstract the use of time events. This component provides the functions - * needed by the driver. - */ - -#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500 -#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400 - -/** - * iwl_mvm_protect_session - start / extend the session protection. - * @mvm: the mvm component - * @vif: the virtual interface for which the session is issued - * @duration: the duration of the session in TU. - * @min_duration: will start a new session if the current session will end - * in less than min_duration. - * @max_delay: maximum delay before starting the time event (in TU) - * @wait_for_notif: true if it is required that a time event notification be - * waited for (that the time event has been scheduled before returning) - * - * This function can be used to start a session protection which means that the - * fw will stay on the channel for %duration_ms milliseconds. This function - * can block (sleep) until the session starts. This function can also be used - * to extend a currently running session. - * This function is meant to be used for BSS association for example, where we - * want to make sure that the fw stays on the channel during the association. - */ -void iwl_mvm_protect_session(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration, u32 min_duration, - u32 max_delay, bool wait_for_notif); - -/** - * iwl_mvm_stop_session_protection - cancel the session protection. - * @mvm: the mvm component - * @vif: the virtual interface for which the session is issued - * - * This functions cancels the session protection which is an act of good - * citizenship. If it is not needed any more it should be canceled because - * the other bindings wait for the medium during that time. - * This funtions doesn't sleep. - */ -void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); - -/* - * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION. - */ -void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - -/** - * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality - * @mvm: the mvm component - * @vif: the virtual interface for which the roc is requested. It is assumed - * that the vif type is NL80211_IFTYPE_P2P_DEVICE - * @duration: the requested duration in millisecond for the fw to be on the - * channel that is bound to the vif. - * @type: the remain on channel request type - * - * This function can be used to issue a remain on channel session, - * which means that the fw will stay in the channel for the request %duration - * milliseconds. The function is async, meaning that it only issues the ROC - * request but does not wait for it to start. Once the FW is ready to serve the - * ROC request, it will issue a notification to the driver that it is on the - * requested channel. Once the FW completes the ROC request it will issue - * another notification to the driver. - */ -int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - int duration, enum ieee80211_roc_type type); - -/** - * iwl_mvm_stop_roc - stop remain on channel functionality - * @mvm: the mvm component - * - * This function can be used to cancel an ongoing ROC session. - * The function is async, it will instruct the FW to stop serving the ROC - * session, but will not wait for the actual stopping of the session. - */ -void iwl_mvm_stop_roc(struct iwl_mvm *mvm); - -/** - * iwl_mvm_remove_time_event - general function to clean up of time event - * @mvm: the mvm component - * @vif: the vif to which the time event belongs - * @te_data: the time event data that corresponds to that time event - * - * This function can be used to cancel a time event regardless its type. - * It is useful for cleaning up time events running before removing an - * interface. - */ -void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - struct iwl_mvm_time_event_data *te_data); - -/** - * iwl_mvm_te_clear_data - remove time event from list - * @mvm: the mvm component - * @te_data: the time event data to remove - * - * This function is mostly internal, it is made available here only - * for firmware restart purposes. - */ -void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, - struct iwl_mvm_time_event_data *te_data); - -void iwl_mvm_roc_done_wk(struct work_struct *wk); - -/** - * iwl_mvm_schedule_csa_period - request channel switch absence period - * @mvm: the mvm component - * @vif: the virtual interface for which the channel switch is issued - * @duration: the duration of the NoA in TU. - * @apply_time: NoA start time in GP2. - * - * This function is used to schedule NoA time event and is used to perform - * the channel switch flow. - */ -int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration, u32 apply_time); - -/** - * iwl_mvm_te_scheduled - check if the fw received the TE cmd - * @te_data: the time event data that corresponds to that time event - * - * This function returns true iff this TE is added to the fw. - */ -static inline bool -iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data) -{ - if (!te_data) - return false; - - return !!te_data->uid; -} - -#endif /* __time_event_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c deleted file mode 100644 index 4007f1d421dd..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/tof.c +++ /dev/null @@ -1,306 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "mvm.h" -#include "fw-api-tof.h" - -#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 - -void iwl_mvm_tof_init(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return; - - memset(tof_data, 0, sizeof(*tof_data)); - - tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (IWL_MVM_TOF_IS_RESPONDER) { - tof_data->responder_cfg.sub_grp_cmd_id = - cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); - tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; - } -#endif - - tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); - tof_data->range_req.req_timeout = 1; - tof_data->range_req.initiator = 1; - tof_data->range_req.report_policy = 3; - - tof_data->range_req_ext.sub_grp_cmd_id = - cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); - - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; -} - -void iwl_mvm_tof_clean(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return; - - memset(tof_data, 0, sizeof(*tof_data)); - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; -} - -static void iwl_tof_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - bool *enabled = _data; - - /* non bss vif exists */ - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) - *enabled = false; -} - -int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) -{ - struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; - bool enabled; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_tof_iterator, &enabled); - if (!enabled) { - IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); - return -EINVAL; - } - - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(*cmd), cmd); -} - -int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) -{ - struct iwl_tof_range_abort_cmd cmd = { - .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), - .request_id = id, - }; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (id != mvm->tof_data.active_range_request) { - IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", - id, mvm->tof_data.active_range_request); - return -EINVAL; - } - - /* after abort is sent there's no active request anymore */ - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(cmd), &cmd); -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (vif->p2p || vif->type != NL80211_IFTYPE_AP || - !mvmvif->ap_ibss_active) { - IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); - return -EIO; - } - - cmd->sta_id = mvmvif->bcast_sta.sta_id; - memcpy(cmd->bssid, vif->addr, ETH_ALEN); - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(*cmd), cmd); -} -#endif - -int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .len = { sizeof(mvm->tof_data.range_req), }, - /* no copy because of the command size */ - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { - IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); - return -EIO; - } - - /* nesting of range requests is not supported in FW */ - if (mvm->tof_data.active_range_request != - IWL_MVM_TOF_RANGE_REQ_MAX_ID) { - IWL_ERR(mvm, "Cannot send range req, already active req %d\n", - mvm->tof_data.active_range_request); - return -EIO; - } - - mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; - - cmd.data[0] = &mvm->tof_data.range_req; - return iwl_mvm_send_cmd(mvm, &cmd); -} - -int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { - IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); - return -EIO; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(mvm->tof_data.range_req_ext), - &mvm->tof_data.range_req_ext); -} - -static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_range_rsp_ntfy *resp = (void *)data; - - if (resp->request_id != mvm->tof_data.active_range_request) { - IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", - resp->request_id, mvm->tof_data.active_range_request); - return -EIO; - } - - memcpy(&mvm->tof_data.range_resp, resp, - sizeof(struct iwl_tof_range_rsp_ntfy)); - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - - return 0; -} - -static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; - - IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); - return 0; -} - -static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_neighbor_report *report = - (struct iwl_tof_neighbor_report *)data; - - IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", - report->bssid, report->request_token, report->status); - return 0; -} - -void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; - - lockdep_assert_held(&mvm->mutex); - - switch (le32_to_cpu(resp->sub_grp_cmd_id)) { - case TOF_RANGE_RESPONSE_NOTIF: - iwl_mvm_tof_range_resp(mvm, resp->data); - break; - case TOF_MCSI_DEBUG_NOTIF: - iwl_mvm_tof_mcsi_notif(mvm, resp->data); - break; - case TOF_NEIGHBOR_REPORT_RSP_NOTIF: - iwl_mvm_tof_nb_report_notif(mvm, resp->data); - break; - default: - IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", - resp->sub_grp_cmd_id); - break; - } -} diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h deleted file mode 100644 index 9beebc33cb8d..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/tof.h +++ /dev/null @@ -1,94 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __tof_h__ -#define __tof_h__ - -#include "fw-api-tof.h" - -struct iwl_mvm_tof_data { - struct iwl_tof_config_cmd tof_cfg; - struct iwl_tof_range_req_cmd range_req; - struct iwl_tof_range_req_ext_cmd range_req_ext; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct iwl_tof_responder_config_cmd responder_cfg; -#endif - struct iwl_tof_range_rsp_ntfy range_resp; - u8 last_abort_id; - u16 active_range_request; -}; - -void iwl_mvm_tof_init(struct iwl_mvm *mvm); -void iwl_mvm_tof_clean(struct iwl_mvm *mvm); -int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm); -int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id); -int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#endif -#endif /* __tof_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c deleted file mode 100644 index cadfc0460597..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ /dev/null @@ -1,460 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include "mvm.h" - -#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ - -static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; - u32 duration = tt->params.ct_kill_duration; - - if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) - return; - - IWL_ERR(mvm, "Enter CT Kill\n"); - iwl_mvm_set_hw_ctkill_state(mvm, true); - - tt->throttle = false; - tt->dynamic_smps = false; - - /* Don't schedule an exit work if we're in test mode, since - * the temperature will not change unless we manually set it - * again (or disable testing). - */ - if (!mvm->temperature_test) - schedule_delayed_work(&tt->ct_kill_exit, - round_jiffies_relative(duration * HZ)); -} - -static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) -{ - if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) - return; - - IWL_ERR(mvm, "Exit CT Kill\n"); - iwl_mvm_set_hw_ctkill_state(mvm, false); -} - -void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) -{ - /* ignore the notification if we are in test mode */ - if (mvm->temperature_test) - return; - - if (mvm->temperature == temp) - return; - - mvm->temperature = temp; - iwl_mvm_tt_handler(mvm); -} - -static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_dts_measurement_notif *notif; - int len = iwl_rx_packet_payload_len(pkt); - int temp; - - if (WARN_ON_ONCE(len != sizeof(*notif))) { - IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); - return -EINVAL; - } - - notif = (void *)pkt->data; - - temp = le32_to_cpu(notif->temp); - - /* shouldn't be negative, but since it's s32, make sure it isn't */ - if (WARN_ON_ONCE(temp < 0)) - temp = 0; - - IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp); - - return temp; -} - -static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - int *temp = data; - int ret; - - ret = iwl_mvm_temp_notif_parse(mvm, pkt); - if (ret < 0) - return true; - - *temp = ret; - - return true; -} - -void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - int temp; - - /* the notification is handled synchronously in ctkill, so skip here */ - if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) - return; - - temp = iwl_mvm_temp_notif_parse(mvm, pkt); - if (temp < 0) - return; - - iwl_mvm_tt_temp_changed(mvm, temp); -} - -static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) -{ - struct iwl_dts_measurement_cmd cmd = { - .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), - }; - struct iwl_ext_dts_measurement_cmd extcmd = { - .control_mode = cpu_to_le32(DTS_AUTOMATIC), - }; - u32 cmdid; - - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) - cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE, - PHY_OPS_GROUP, 0); - else - cmdid = CMD_DTS_MEASUREMENT_TRIGGER; - - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) - return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd); - - return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); -} - -int iwl_mvm_get_temp(struct iwl_mvm *mvm) -{ - struct iwl_notification_wait wait_temp_notif; - static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, - DTS_MEASUREMENT_NOTIF_WIDE) }; - int ret, temp; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) - temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; - - lockdep_assert_held(&mvm->mutex); - - iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, - temp_notif, ARRAY_SIZE(temp_notif), - iwl_mvm_temp_notif_wait, &temp); - - ret = iwl_mvm_get_temp_cmd(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret); - iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif); - return ret; - } - - ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, - IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); - if (ret) { - IWL_ERR(mvm, "Getting the temperature timed out\n"); - return ret; - } - - return temp; -} - -static void check_exit_ctkill(struct work_struct *work) -{ - struct iwl_mvm_tt_mgmt *tt; - struct iwl_mvm *mvm; - u32 duration; - s32 temp; - - tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); - mvm = container_of(tt, struct iwl_mvm, thermal_throttle); - - duration = tt->params.ct_kill_duration; - - mutex_lock(&mvm->mutex); - - if (__iwl_mvm_mac_start(mvm)) - goto reschedule; - - /* make sure the device is available for direct read/writes */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) { - __iwl_mvm_mac_stop(mvm); - goto reschedule; - } - - temp = iwl_mvm_get_temp(mvm); - - iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); - - __iwl_mvm_mac_stop(mvm); - - if (temp < 0) - goto reschedule; - - IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); - - if (temp <= tt->params.ct_kill_exit) { - mutex_unlock(&mvm->mutex); - iwl_mvm_exit_ctkill(mvm); - return; - } - -reschedule: - mutex_unlock(&mvm->mutex); - schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, - round_jiffies(duration * HZ)); -} - -static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = _data; - enum ieee80211_smps_mode smps_mode; - - lockdep_assert_held(&mvm->mutex); - - if (mvm->thermal_throttle.dynamic_smps) - smps_mode = IEEE80211_SMPS_DYNAMIC; - else - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - if (vif->type != NL80211_IFTYPE_STATION) - return; - - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); -} - -static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - int i, err; - - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) - continue; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (enable == mvmsta->tt_tx_protection) - continue; - err = iwl_mvm_tx_protection(mvm, mvmsta, enable); - if (err) { - IWL_ERR(mvm, "Failed to %s Tx protection\n", - enable ? "enable" : "disable"); - } else { - IWL_DEBUG_TEMP(mvm, "%s Tx protection\n", - enable ? "Enable" : "Disable"); - mvmsta->tt_tx_protection = enable; - } - } -} - -void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) -{ - struct iwl_host_cmd cmd = { - .id = REPLY_THERMAL_MNG_BACKOFF, - .len = { sizeof(u32), }, - .data = { &backoff, }, - }; - - backoff = max(backoff, mvm->thermal_throttle.min_backoff); - - if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { - IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", - backoff); - mvm->thermal_throttle.tx_backoff = backoff; - } else { - IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n"); - } -} - -void iwl_mvm_tt_handler(struct iwl_mvm *mvm) -{ - struct iwl_tt_params *params = &mvm->thermal_throttle.params; - struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; - s32 temperature = mvm->temperature; - bool throttle_enable = false; - int i; - u32 tx_backoff; - - IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature); - - if (params->support_ct_kill && temperature >= params->ct_kill_entry) { - iwl_mvm_enter_ctkill(mvm); - return; - } - - if (params->support_ct_kill && - temperature <= params->ct_kill_exit) { - iwl_mvm_exit_ctkill(mvm); - return; - } - - if (params->support_dynamic_smps) { - if (!tt->dynamic_smps && - temperature >= params->dynamic_smps_entry) { - IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n"); - tt->dynamic_smps = true; - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_tt_smps_iterator, mvm); - throttle_enable = true; - } else if (tt->dynamic_smps && - temperature <= params->dynamic_smps_exit) { - IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n"); - tt->dynamic_smps = false; - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_tt_smps_iterator, mvm); - } - } - - if (params->support_tx_protection) { - if (temperature >= params->tx_protection_entry) { - iwl_mvm_tt_tx_protection(mvm, true); - throttle_enable = true; - } else if (temperature <= params->tx_protection_exit) { - iwl_mvm_tt_tx_protection(mvm, false); - } - } - - if (params->support_tx_backoff) { - tx_backoff = tt->min_backoff; - for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { - if (temperature < params->tx_backoff[i].temperature) - break; - tx_backoff = max(tt->min_backoff, - params->tx_backoff[i].backoff); - } - if (tx_backoff != tt->min_backoff) - throttle_enable = true; - if (tt->tx_backoff != tx_backoff) - iwl_mvm_tt_tx_backoff(mvm, tx_backoff); - } - - if (!tt->throttle && throttle_enable) { - IWL_WARN(mvm, - "Due to high temperature thermal throttling initiated\n"); - tt->throttle = true; - } else if (tt->throttle && !tt->dynamic_smps && - tt->tx_backoff == tt->min_backoff && - temperature <= params->tx_protection_exit) { - IWL_WARN(mvm, - "Temperature is back to normal thermal throttling stopped\n"); - tt->throttle = false; - } -} - -static const struct iwl_tt_params iwl_mvm_default_tt_params = { - .ct_kill_entry = 118, - .ct_kill_exit = 96, - .ct_kill_duration = 5, - .dynamic_smps_entry = 114, - .dynamic_smps_exit = 110, - .tx_protection_entry = 114, - .tx_protection_exit = 108, - .tx_backoff = { - {.temperature = 112, .backoff = 200}, - {.temperature = 113, .backoff = 600}, - {.temperature = 114, .backoff = 1200}, - {.temperature = 115, .backoff = 2000}, - {.temperature = 116, .backoff = 4000}, - {.temperature = 117, .backoff = 10000}, - }, - .support_ct_kill = true, - .support_dynamic_smps = true, - .support_tx_protection = true, - .support_tx_backoff = true, -}; - -void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) -{ - struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; - - IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); - - if (mvm->cfg->thermal_params) - tt->params = *mvm->cfg->thermal_params; - else - tt->params = iwl_mvm_default_tt_params; - - tt->throttle = false; - tt->dynamic_smps = false; - tt->min_backoff = min_backoff; - INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); -} - -void iwl_mvm_tt_exit(struct iwl_mvm *mvm) -{ - cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); - IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); -} diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c deleted file mode 100644 index c652a66be803..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ /dev/null @@ -1,1115 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include - -#include "iwl-trans.h" -#include "iwl-eeprom-parse.h" -#include "mvm.h" -#include "sta.h" - -static void -iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, - u16 tid, u16 ssn) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_ba *ba_trig; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) - return; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); - ba_trig = (void *)trig->data; - - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) - return; - - if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR sent to %pM, tid %d, ssn %d", - addr, tid, ssn); -} - -/* - * Sets most of the Tx cmd's fields - */ -void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, u8 sta_id) -{ - struct ieee80211_hdr *hdr = (void *)skb->data; - __le16 fc = hdr->frame_control; - u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); - u32 len = skb->len + FCS_LEN; - u8 ac; - - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - tx_flags |= TX_CMD_FLG_ACK; - else - tx_flags &= ~TX_CMD_FLG_ACK; - - if (ieee80211_is_probe_resp(fc)) - tx_flags |= TX_CMD_FLG_TSF; - - if (ieee80211_has_morefrags(fc)) - tx_flags |= TX_CMD_FLG_MORE_FRAG; - - if (ieee80211_is_data_qos(fc)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tx_cmd->tid_tspec = qc[0] & 0xf; - tx_flags &= ~TX_CMD_FLG_SEQ_CTL; - } else if (ieee80211_is_back_req(fc)) { - struct ieee80211_bar *bar = (void *)skb->data; - u16 control = le16_to_cpu(bar->control); - u16 ssn = le16_to_cpu(bar->start_seq_num); - - tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; - tx_cmd->tid_tspec = (control & - IEEE80211_BAR_CTRL_TID_INFO_MASK) >> - IEEE80211_BAR_CTRL_TID_INFO_SHIFT; - WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); - iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, - ssn); - } else { - tx_cmd->tid_tspec = IWL_TID_NON_QOS; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - tx_flags |= TX_CMD_FLG_SEQ_CTL; - else - tx_flags &= ~TX_CMD_FLG_SEQ_CTL; - } - - /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ - if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) - ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; - else - ac = tid_to_mac80211_ac[0]; - - tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << - TX_CMD_FLG_BT_PRIO_POS; - - if (ieee80211_is_mgmt(fc)) { - if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); - else if (ieee80211_is_action(fc)) - tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); - else - tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); - - /* The spec allows Action frames in A-MPDU, we don't support - * it - */ - WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); - } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { - tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); - } else { - tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); - } - - if (ieee80211_is_data(fc) && len > mvm->rts_threshold && - !is_multicast_ether_addr(ieee80211_get_DA(hdr))) - tx_flags |= TX_CMD_FLG_PROT_REQUIRE; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && - ieee80211_action_contains_tpc(skb)) - tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; - - tx_cmd->tx_flags = cpu_to_le32(tx_flags); - /* Total # bytes to be transmitted */ - tx_cmd->len = cpu_to_le16((u16)skb->len); - tx_cmd->next_frame_len = 0; - tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); - tx_cmd->sta_id = sta_id; -} - -/* - * Sets the fields in the Tx cmd that are rate related - */ -void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, __le16 fc) -{ - u32 rate_flags; - int rate_idx; - u8 rate_plcp; - - /* Set retry limit on RTS packets */ - tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; - - /* Set retry limit on DATA packets and Probe Responses*/ - if (ieee80211_is_probe_resp(fc)) { - tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; - tx_cmd->rts_retry_limit = - min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); - } else if (ieee80211_is_back_req(fc)) { - tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; - } else { - tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; - } - - /* - * for data packets, rate info comes from the table inside the fw. This - * table is controlled by LINK_QUALITY commands - */ - - if (ieee80211_is_data(fc) && sta) { - tx_cmd->initial_rate_index = 0; - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); - return; - } else if (ieee80211_is_back_req(fc)) { - tx_cmd->tx_flags |= - cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); - } - - /* HT rate doesn't make sense for a non data frame */ - WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, - "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", - info->control.rates[0].flags, - info->control.rates[0].idx, - le16_to_cpu(fc)); - - rate_idx = info->control.rates[0].idx; - /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY) - rate_idx = rate_lowest_index( - &mvm->nvm_data->bands[info->band], sta); - - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - - /* For 2.4 GHZ band, check that there is no need to remap */ - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); - - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); - - mvm->mgmt_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->mgmt_last_antenna_idx); - - if (info->band == IEEE80211_BAND_2GHZ && - !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; - else - rate_flags = - BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; - - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); -} - -/* - * Sets the fields in the Tx cmd that are crypto related - */ -static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag, - int hdrlen) -{ - struct ieee80211_key_conf *keyconf = info->control.hw_key; - u8 *crypto_hdr = skb_frag->data + hdrlen; - u64 pn; - - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_CCMP_256: - iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); - pn = atomic64_inc_return(&keyconf->tx_pn); - crypto_hdr[0] = pn; - crypto_hdr[2] = 0; - crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); - crypto_hdr[1] = pn >> 8; - crypto_hdr[4] = pn >> 16; - crypto_hdr[5] = pn >> 24; - crypto_hdr[6] = pn >> 32; - crypto_hdr[7] = pn >> 40; - break; - - case WLAN_CIPHER_SUITE_TKIP: - tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; - ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); - break; - - case WLAN_CIPHER_SUITE_WEP104: - tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ - case WLAN_CIPHER_SUITE_WEP40: - tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | - ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & - TX_CMD_SEC_WEP_KEY_IDX_MSK); - - memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); - break; - default: - tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; - } -} - -/* - * Allocates and sets the Tx cmd the driver data pointers in the skb - */ -static struct iwl_device_cmd * -iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, - int hdrlen, struct ieee80211_sta *sta, u8 sta_id) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; - - dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); - - if (unlikely(!dev_cmd)) - return NULL; - - memset(dev_cmd, 0, sizeof(*dev_cmd)); - dev_cmd->hdr.cmd = TX_CMD; - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - - if (info->control.hw_key) - iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); - - iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); - - iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); - - memset(&info->status, 0, sizeof(info->status)); - - info->driver_data[0] = NULL; - info->driver_data[1] = dev_cmd; - - return dev_cmd; -} - -int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; - u8 sta_id; - int hdrlen = ieee80211_hdrlen(hdr->frame_control); - - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) - return -1; - - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && - (!info->control.vif || - info->hw_queue != info->control.vif->cab_queue))) - return -1; - - /* - * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used - * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel - * queue. STATION (HS2.0) uses the auxiliary context of the FW, - * and hence needs to be sent on the aux queue - */ - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && - info->control.vif->type == NL80211_IFTYPE_STATION) - IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; - - /* - * If the interface on which the frame is sent is the P2P_DEVICE - * or an AP/GO interface use the broadcast station associated - * with it; otherwise if the interface is a managed interface - * use the AP station associated with it for multicast traffic - * (this is not possible for unicast packets as a TLDS discovery - * response are sent without a station entry); otherwise use the - * AUX station. - */ - sta_id = mvm->aux_sta.sta_id; - if (info->control.vif) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(info->control.vif); - - if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info->control.vif->type == NL80211_IFTYPE_AP) - sta_id = mvmvif->bcast_sta.sta_id; - else if (info->control.vif->type == NL80211_IFTYPE_STATION && - is_multicast_ether_addr(hdr->addr1)) { - u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); - - if (ap_sta_id != IWL_MVM_STATION_COUNT) - sta_id = ap_sta_id; - } - } - - IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); - - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); - if (!dev_cmd) - return -1; - - /* From now on, we cannot access info->control */ - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { - iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); - return -1; - } - - return 0; -} - -/* - * Sets the fields in the Tx cmd that are crypto related - */ -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_mvm_sta *mvmsta; - struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; - __le16 fc; - u16 seq_number = 0; - u8 tid = IWL_MAX_TID_COUNT; - u8 txq_id = info->hw_queue; - bool is_data_qos = false, is_ampdu = false; - int hdrlen; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - fc = hdr->frame_control; - hdrlen = ieee80211_hdrlen(fc); - - if (WARN_ON_ONCE(!mvmsta)) - return -1; - - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) - return -1; - - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); - if (!dev_cmd) - goto drop; - - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - /* From now on, we cannot access info->control */ - - /* - * we handle that entirely ourselves -- for uAPSD the firmware - * will always send a notification, and for PS-Poll responses - * we'll notify mac80211 when getting frame status - */ - info->flags &= ~IEEE80211_TX_STATUS_EOSP; - - spin_lock(&mvmsta->lock); - - if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - u8 *qc = NULL; - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) - goto drop_unlock_sta; - - seq_number = mvmsta->tid_data[tid].seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); - is_data_qos = true; - is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; - } - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - - WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); - - if (sta->tdls) { - /* default to TID 0 for non-QoS packets */ - u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; - - txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; - } - - if (is_ampdu) { - if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) - goto drop_unlock_sta; - txq_id = mvmsta->tid_data[tid].txq_id; - } - - IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, - tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); - - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) - goto drop_unlock_sta; - - if (is_data_qos && !ieee80211_has_morefrags(fc)) - mvmsta->tid_data[tid].seq_number = seq_number + 0x10; - - spin_unlock(&mvmsta->lock); - - if (txq_id < mvm->first_agg_queue) - atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); - - return 0; - -drop_unlock_sta: - iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); - spin_unlock(&mvmsta->lock); -drop: - return -1; -} - -static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, u8 tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - struct ieee80211_vif *vif = mvmsta->vif; - - lockdep_assert_held(&mvmsta->lock); - - if ((tid_data->state == IWL_AGG_ON || - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && - iwl_mvm_tid_queued(tid_data) == 0) { - /* - * Now that this aggregation queue is empty tell mac80211 so it - * knows we no longer have frames buffered for the station on - * this TID (for the TIM bitmap calculation.) - */ - ieee80211_sta_set_buffered(sta, tid, false); - } - - if (tid_data->ssn != tid_data->next_reclaimed) - return; - - switch (tid_data->state) { - case IWL_EMPTYING_HW_QUEUE_ADDBA: - IWL_DEBUG_TX_QUEUES(mvm, - "Can continue addBA flow ssn = next_recl = %d\n", - tid_data->next_reclaimed); - tid_data->state = IWL_AGG_STARTING; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - - case IWL_EMPTYING_HW_QUEUE_DELBA: - IWL_DEBUG_TX_QUEUES(mvm, - "Can continue DELBA flow ssn = next_recl = %d\n", - tid_data->next_reclaimed); - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - CMD_ASYNC); - tid_data->state = IWL_AGG_OFF; - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - - default: - break; - } -} - -#ifdef CONFIG_IWLWIFI_DEBUG -const char *iwl_mvm_get_tx_fail_reason(u32 status) -{ -#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x -#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x - - switch (status & TX_STATUS_MSK) { - case TX_STATUS_SUCCESS: - return "SUCCESS"; - TX_STATUS_POSTPONE(DELAY); - TX_STATUS_POSTPONE(FEW_BYTES); - TX_STATUS_POSTPONE(BT_PRIO); - TX_STATUS_POSTPONE(QUIET_PERIOD); - TX_STATUS_POSTPONE(CALC_TTAK); - TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); - TX_STATUS_FAIL(SHORT_LIMIT); - TX_STATUS_FAIL(LONG_LIMIT); - TX_STATUS_FAIL(UNDERRUN); - TX_STATUS_FAIL(DRAIN_FLOW); - TX_STATUS_FAIL(RFKILL_FLUSH); - TX_STATUS_FAIL(LIFE_EXPIRE); - TX_STATUS_FAIL(DEST_PS); - TX_STATUS_FAIL(HOST_ABORTED); - TX_STATUS_FAIL(BT_RETRY); - TX_STATUS_FAIL(STA_INVALID); - TX_STATUS_FAIL(FRAG_DROPPED); - TX_STATUS_FAIL(TID_DISABLE); - TX_STATUS_FAIL(FIFO_FLUSHED); - TX_STATUS_FAIL(SMALL_CF_POLL); - TX_STATUS_FAIL(FW_DROP); - TX_STATUS_FAIL(STA_COLOR_MISMATCH); - } - - return "UNKNOWN"; - -#undef TX_STATUS_FAIL -#undef TX_STATUS_POSTPONE -} -#endif /* CONFIG_IWLWIFI_DEBUG */ - -void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum ieee80211_band band, - struct ieee80211_tx_rate *r) -{ - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - r->flags |= IEEE80211_TX_RC_GREEN_FIELD; - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - break; - case RATE_MCS_CHAN_WIDTH_80: - r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; - break; - case RATE_MCS_CHAN_WIDTH_160: - r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; - break; - } - if (rate_n_flags & RATE_MCS_SGI_MSK) - r->flags |= IEEE80211_TX_RC_SHORT_GI; - if (rate_n_flags & RATE_MCS_HT_MSK) { - r->flags |= IEEE80211_TX_RC_MCS; - r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - ieee80211_rate_set_vht( - r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1); - r->flags |= IEEE80211_TX_RC_VHT_MCS; - } else { - r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - band); - } -} - -/** - * translate ucode response to mac80211 tx status control values - */ -static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, - struct ieee80211_tx_info *info) -{ - struct ieee80211_tx_rate *r = &info->status.rates[0]; - - info->status.antenna = - ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); - iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); -} - -static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct ieee80211_sta *sta; - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); - int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); - u32 status = le16_to_cpu(tx_resp->status.status); - u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); - struct iwl_mvm_sta *mvmsta; - struct sk_buff_head skbs; - u8 skb_freed = 0; - u16 next_reclaimed, seq_ctl; - - __skb_queue_head_init(&skbs); - - seq_ctl = le16_to_cpu(tx_resp->seq_ctl); - - /* we can free until ssn % q.n_bd not inclusive */ - iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); - - while (!skb_queue_empty(&skbs)) { - struct sk_buff *skb = __skb_dequeue(&skbs); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - skb_freed++; - - iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); - - memset(&info->status, 0, sizeof(info->status)); - - info->flags &= ~IEEE80211_TX_CTL_AMPDU; - - /* inform mac80211 about what happened with the frame */ - switch (status & TX_STATUS_MSK) { - case TX_STATUS_SUCCESS: - case TX_STATUS_DIRECT_DONE: - info->flags |= IEEE80211_TX_STAT_ACK; - break; - case TX_STATUS_FAIL_DEST_PS: - info->flags |= IEEE80211_TX_STAT_TX_FILTERED; - break; - default: - break; - } - - info->status.rates[0].count = tx_resp->failure_frame + 1; - iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), - info); - info->status.status_driver_data[1] = - (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); - - /* Single frame failure in an AMPDU queue => send BAR */ - if (txq_id >= mvm->first_agg_queue && - !(info->flags & IEEE80211_TX_STAT_ACK) && - !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) - info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - - /* W/A FW bug: seq_ctl is wrong when the status isn't success */ - if (status != TX_STATUS_SUCCESS) { - struct ieee80211_hdr *hdr = (void *)skb->data; - seq_ctl = le16_to_cpu(hdr->seq_ctrl); - } - - /* - * TODO: this is not accurate if we are freeing more than one - * packet. - */ - info->status.tx_time = - le16_to_cpu(tx_resp->wireless_media_time); - BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); - info->status.status_driver_data[0] = - (void *)(uintptr_t)tx_resp->reduced_tpc; - - ieee80211_tx_status(mvm->hw, skb); - } - - if (txq_id >= mvm->first_agg_queue) { - /* If this is an aggregation queue, we use the ssn since: - * ssn = wifi seq_num % 256. - * The seq_ctl is the sequence control of the packet to which - * this Tx response relates. But if there is a hole in the - * bitmap of the BA we received, this Tx response may allow to - * reclaim the hole and all the subsequent packets that were - * already acked. In that case, seq_ctl != ssn, and the next - * packet to be reclaimed will be ssn and not seq_ctl. In that - * case, several packets will be reclaimed even if - * frame_count = 1. - * - * The ssn is the index (% 256) of the latest packet that has - * treated (acked / dropped) + 1. - */ - next_reclaimed = ssn; - } else { - /* The next packet to be reclaimed is the one after this one */ - next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); - } - - IWL_DEBUG_TX_REPLY(mvm, - "TXQ %d status %s (0x%08x)\n", - txq_id, iwl_mvm_get_tx_fail_reason(status), status); - - IWL_DEBUG_TX_REPLY(mvm, - "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", - le32_to_cpu(tx_resp->initial_rate), - tx_resp->failure_frame, SEQ_TO_INDEX(sequence), - ssn, next_reclaimed, seq_ctl); - - rcu_read_lock(); - - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - /* - * sta can't be NULL otherwise it'd mean that the sta has been freed in - * the firmware while we still have packets for it in the Tx queues. - */ - if (WARN_ON_ONCE(!sta)) - goto out; - - if (!IS_ERR(sta)) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - if (tid != IWL_TID_NON_QOS) { - struct iwl_mvm_tid_data *tid_data = - &mvmsta->tid_data[tid]; - - spin_lock_bh(&mvmsta->lock); - tid_data->next_reclaimed = next_reclaimed; - IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", - next_reclaimed); - iwl_mvm_check_ratid_empty(mvm, sta, tid); - spin_unlock_bh(&mvmsta->lock); - } - - if (mvmsta->next_status_eosp) { - mvmsta->next_status_eosp = false; - ieee80211_sta_eosp(sta); - } - } else { - mvmsta = NULL; - } - - /* - * If the txq is not an AMPDU queue, there is no chance we freed - * several skbs. Check that out... - */ - if (txq_id >= mvm->first_agg_queue) - goto out; - - /* We can't free more than one frame at once on a shared queue */ - WARN_ON(skb_freed > 1); - - /* If we have still frames for this STA nothing to do here */ - if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) - goto out; - - if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) { - - /* - * If there are no pending frames for this STA and - * the tx to this station is not disabled, notify - * mac80211 that this station can now wake up in its - * STA table. - * If mvmsta is not NULL, sta is valid. - */ - - spin_lock_bh(&mvmsta->lock); - - if (!mvmsta->disable_tx) - ieee80211_sta_block_awake(mvm->hw, sta, false); - - spin_unlock_bh(&mvmsta->lock); - } - - if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) { - /* - * We are draining and this was the last packet - pre_rcu_remove - * has been called already. We might be after the - * synchronize_net already. - * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues. - */ - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); - } - -out: - rcu_read_unlock(); -} - -#ifdef CONFIG_IWLWIFI_DEBUG -#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x -static const char *iwl_get_agg_tx_status(u16 status) -{ - switch (status & AGG_TX_STATE_STATUS_MSK) { - AGG_TX_STATE_(TRANSMITTED); - AGG_TX_STATE_(UNDERRUN); - AGG_TX_STATE_(BT_PRIO); - AGG_TX_STATE_(FEW_BYTES); - AGG_TX_STATE_(ABORT); - AGG_TX_STATE_(LAST_SENT_TTL); - AGG_TX_STATE_(LAST_SENT_TRY_CNT); - AGG_TX_STATE_(LAST_SENT_BT_KILL); - AGG_TX_STATE_(SCD_QUERY); - AGG_TX_STATE_(TEST_BAD_CRC32); - AGG_TX_STATE_(RESPONSE); - AGG_TX_STATE_(DUMP_TX); - AGG_TX_STATE_(DELAY_TX); - } - - return "UNKNOWN"; -} - -static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - struct agg_tx_status *frame_status = &tx_resp->status; - int i; - - for (i = 0; i < tx_resp->frame_count; i++) { - u16 fstatus = le16_to_cpu(frame_status[i].status); - - IWL_DEBUG_TX_REPLY(mvm, - "status %s (0x%04x), try-count (%d) seq (0x%x)\n", - iwl_get_agg_tx_status(fstatus), - fstatus & AGG_TX_STATE_STATUS_MSK, - (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> - AGG_TX_STATE_TRY_CNT_POS, - le16_to_cpu(frame_status[i].sequence)); - } -} -#else -static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{} -#endif /* CONFIG_IWLWIFI_DEBUG */ - -static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); - int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - struct ieee80211_sta *sta; - - if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue)) - return; - - if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) - return; - - iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); - - rcu_read_lock(); - - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvmsta->tid_data[tid].rate_n_flags = - le32_to_cpu(tx_resp->initial_rate); - mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc; - mvmsta->tid_data[tid].tx_time = - le16_to_cpu(tx_resp->wireless_media_time); - } - - rcu_read_unlock(); -} - -void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - - if (tx_resp->frame_count == 1) - iwl_mvm_rx_tx_cmd_single(mvm, pkt); - else - iwl_mvm_rx_tx_cmd_agg(mvm, pkt); -} - -static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, - struct iwl_mvm_ba_notif *ba_notif, - struct iwl_mvm_tid_data *tid_data) -{ - info->flags |= IEEE80211_TX_STAT_AMPDU; - info->status.ampdu_ack_len = ba_notif->txed_2_done; - info->status.ampdu_len = ba_notif->txed; - iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, - info); - /* TODO: not accounted if the whole A-MPDU failed */ - info->status.tx_time = tid_data->tx_time; - info->status.status_driver_data[0] = - (void *)(uintptr_t)tid_data->reduced_tpc; - info->status.status_driver_data[1] = - (void *)(uintptr_t)tid_data->rate_n_flags; -} - -void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; - struct sk_buff_head reclaimed_skbs; - struct iwl_mvm_tid_data *tid_data; - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - struct sk_buff *skb; - int sta_id, tid, freed; - /* "flow" corresponds to Tx queue */ - u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); - /* "ssn" is start of block-ack Tx window, corresponds to index - * (in Tx queue's circular buffer) of first TFD/frame in window */ - u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); - - sta_id = ba_notif->sta_id; - tid = ba_notif->tid; - - if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || - tid >= IWL_MAX_TID_COUNT, - "sta_id %d tid %d", sta_id, tid)) - return; - - rcu_read_lock(); - - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - /* Reclaiming frames for a station that has been deleted ? */ - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - rcu_read_unlock(); - return; - } - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - tid_data = &mvmsta->tid_data[tid]; - - if (tid_data->txq_id != scd_flow) { - IWL_ERR(mvm, - "invalid BA notification: Q %d, tid %d, flow %d\n", - tid_data->txq_id, tid, scd_flow); - rcu_read_unlock(); - return; - } - - spin_lock_bh(&mvmsta->lock); - - __skb_queue_head_init(&reclaimed_skbs); - - /* - * Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack window (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). - */ - iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn, - &reclaimed_skbs); - - IWL_DEBUG_TX_REPLY(mvm, - "BA_NOTIFICATION Received from %pM, sta_id = %d\n", - (u8 *)&ba_notif->sta_addr_lo32, - ba_notif->sta_id); - IWL_DEBUG_TX_REPLY(mvm, - "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", - ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), - (unsigned long long)le64_to_cpu(ba_notif->bitmap), - scd_flow, ba_resp_scd_ssn, ba_notif->txed, - ba_notif->txed_2_done); - - tid_data->next_reclaimed = ba_resp_scd_ssn; - - iwl_mvm_check_ratid_empty(mvm, sta, tid); - - freed = 0; - - skb_queue_walk(&reclaimed_skbs, skb) { - struct ieee80211_hdr *hdr = (void *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (ieee80211_is_data_qos(hdr->frame_control)) - freed++; - else - WARN_ON_ONCE(1); - - iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); - - memset(&info->status, 0, sizeof(info->status)); - /* Packet was transmitted successfully, failures come as single - * frames because before failing a frame the firmware transmits - * it without aggregation at least once. - */ - info->flags |= IEEE80211_TX_STAT_ACK; - - /* this is the first skb we deliver in this batch */ - /* put the rate scaling data there */ - if (freed == 1) - iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data); - } - - spin_unlock_bh(&mvmsta->lock); - - /* We got a BA notif with 0 acked or scd_ssn didn't progress which is - * possible (i.e. first MPDU in the aggregation wasn't acked) - * Still it's important to update RS about sent vs. acked. - */ - if (skb_queue_empty(&reclaimed_skbs)) { - struct ieee80211_tx_info ba_info = {}; - struct ieee80211_chanctx_conf *chanctx_conf = NULL; - - if (mvmsta->vif) - chanctx_conf = - rcu_dereference(mvmsta->vif->chanctx_conf); - - if (WARN_ON_ONCE(!chanctx_conf)) - goto out; - - ba_info.band = chanctx_conf->def.chan->band; - iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data); - - IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); - iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info); - } - -out: - rcu_read_unlock(); - - while (!skb_queue_empty(&reclaimed_skbs)) { - skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(mvm->hw, skb); - } -} - -/* - * Note that there are transports that buffer frames before they reach - * the firmware. This means that after flush_tx_path is called, the - * queue might not be empty. The race-free way to handle this is to: - * 1) set the station as draining - * 2) flush the Tx path - * 3) wait for the transport queues to be empty - */ -int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) -{ - int ret; - struct iwl_tx_path_flush_cmd flush_cmd = { - .queues_ctl = cpu_to_le32(tfd_msk), - .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), - }; - - ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, - sizeof(flush_cmd), &flush_cmd); - if (ret) - IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); - return ret; -} diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c deleted file mode 100644 index ad0f16909e2e..000000000000 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ /dev/null @@ -1,1083 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include - -#include "iwl-debug.h" -#include "iwl-io.h" -#include "iwl-prph.h" - -#include "mvm.h" -#include "fw-api-rs.h" - -/* - * Will return 0 even if the cmd failed when RFKILL is asserted unless - * CMD_WANT_SKB is set in cmd->flags. - */ -int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) -{ - int ret; - -#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) - if (WARN_ON(mvm->d3_test_active)) - return -EIO; -#endif - - /* - * Synchronous commands from this op-mode must hold - * the mutex, this ensures we don't try to send two - * (or more) synchronous commands at a time. - */ - if (!(cmd->flags & CMD_ASYNC)) - lockdep_assert_held(&mvm->mutex); - - ret = iwl_trans_send_cmd(mvm->trans, cmd); - - /* - * If the caller wants the SKB, then don't hide any problems, the - * caller might access the response buffer which will be NULL if - * the command failed. - */ - if (cmd->flags & CMD_WANT_SKB) - return ret; - - /* Silently ignore failures if RFKILL is asserted */ - if (!ret || ret == -ERFKILL) - return 0; - return ret; -} - -int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, - u32 flags, u16 len, const void *data) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = { len, }, - .data = { data, }, - .flags = flags, - }; - - return iwl_mvm_send_cmd(mvm, &cmd); -} - -/* - * We assume that the caller set the status to the success value - */ -int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, - u32 *status) -{ - struct iwl_rx_packet *pkt; - struct iwl_cmd_response *resp; - int ret, resp_len; - - lockdep_assert_held(&mvm->mutex); - -#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) - if (WARN_ON(mvm->d3_test_active)) - return -EIO; -#endif - - /* - * Only synchronous commands can wait for status, - * we use WANT_SKB so the caller can't. - */ - if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), - "cmd flags %x", cmd->flags)) - return -EINVAL; - - cmd->flags |= CMD_WANT_SKB; - - ret = iwl_trans_send_cmd(mvm->trans, cmd); - if (ret == -ERFKILL) { - /* - * The command failed because of RFKILL, don't update - * the status, leave it as success and return 0. - */ - return 0; - } else if (ret) { - return ret; - } - - pkt = cmd->resp_pkt; - /* Can happen if RFKILL is asserted */ - if (!pkt) { - ret = 0; - goto out_free_resp; - } - - resp_len = iwl_rx_packet_payload_len(pkt); - if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { - ret = -EIO; - goto out_free_resp; - } - - resp = (void *)pkt->data; - *status = le32_to_cpu(resp->status); - out_free_resp: - iwl_free_resp(cmd); - return ret; -} - -/* - * We assume that the caller set the status to the sucess value - */ -int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, - const void *data, u32 *status) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = { len, }, - .data = { data, }, - }; - - return iwl_mvm_send_cmd_status(mvm, &cmd, status); -} - -#define IWL_DECLARE_RATE_INFO(r) \ - [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP - -/* - * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP - */ -static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { - IWL_DECLARE_RATE_INFO(1), - IWL_DECLARE_RATE_INFO(2), - IWL_DECLARE_RATE_INFO(5), - IWL_DECLARE_RATE_INFO(11), - IWL_DECLARE_RATE_INFO(6), - IWL_DECLARE_RATE_INFO(9), - IWL_DECLARE_RATE_INFO(12), - IWL_DECLARE_RATE_INFO(18), - IWL_DECLARE_RATE_INFO(24), - IWL_DECLARE_RATE_INFO(36), - IWL_DECLARE_RATE_INFO(48), - IWL_DECLARE_RATE_INFO(54), -}; - -int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, - enum ieee80211_band band) -{ - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; - int idx; - int band_offset = 0; - - /* Legacy rate format, search for match in table */ - if (band == IEEE80211_BAND_5GHZ) - band_offset = IWL_FIRST_OFDM_RATE; - for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) - if (fw_rate_idx_to_plcp[idx] == rate) - return idx - band_offset; - - return -1; -} - -u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) -{ - /* Get PLCP rate for tx_cmd->rate_n_flags */ - return fw_rate_idx_to_plcp[rate_idx]; -} - -void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_error_resp *err_resp = (void *)pkt->data; - - IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", - le32_to_cpu(err_resp->error_type), err_resp->cmd_id); - IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", - le16_to_cpu(err_resp->bad_cmd_seq_num), - le32_to_cpu(err_resp->error_service)); - IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", - le64_to_cpu(err_resp->timestamp)); -} - -/* - * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. - * The parameter should also be a combination of ANT_[ABC]. - */ -u8 first_antenna(u8 mask) -{ - BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ - if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ - return BIT(0); - return BIT(ffs(mask) - 1); -} - -/* - * Toggles between TX antennas to send the probe request on. - * Receives the bitmask of valid TX antennas and the *index* used - * for the last TX, and returns the next valid *index* to use. - * In order to set it in the tx_cmd, must do BIT(idx). - */ -u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) -{ - u8 ind = last_idx; - int i; - - for (i = 0; i < RATE_MCS_ANT_NUM; i++) { - ind = (ind + 1) % RATE_MCS_ANT_NUM; - if (valid & BIT(ind)) - return ind; - } - - WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); - return last_idx; -} - -static const struct { - const char *name; - u8 num; -} advanced_lookup[] = { - { "NMI_INTERRUPT_WDG", 0x34 }, - { "SYSASSERT", 0x35 }, - { "UCODE_VERSION_MISMATCH", 0x37 }, - { "BAD_COMMAND", 0x38 }, - { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, - { "FATAL_ERROR", 0x3D }, - { "NMI_TRM_HW_ERR", 0x46 }, - { "NMI_INTERRUPT_TRM", 0x4C }, - { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, - { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, - { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, - { "NMI_INTERRUPT_HOST", 0x66 }, - { "NMI_INTERRUPT_ACTION_PT", 0x7C }, - { "NMI_INTERRUPT_UNKNOWN", 0x84 }, - { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, - { "ADVANCED_SYSASSERT", 0 }, -}; - -static const char *desc_lookup(u32 num) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) - if (advanced_lookup[i].num == num) - return advanced_lookup[i].name; - - /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ - return advanced_lookup[i].name; -} - -/* - * Note: This structure is read from the device with IO accesses, - * and the reading already does the endian conversion. As it is - * read with u32-sized accesses, any members with a different size - * need to be ordered correctly though! - */ -struct iwl_error_event_table_v1 { - u32 valid; /* (nonzero) valid, (0) log is empty */ - u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ - u32 blink2; /* branch link */ - u32 ilink1; /* interrupt link */ - u32 ilink2; /* interrupt link */ - u32 data1; /* error-specific data */ - u32 data2; /* error-specific data */ - u32 data3; /* error-specific data */ - u32 bcon_time; /* beacon timer */ - u32 tsf_low; /* network timestamp function timer */ - u32 tsf_hi; /* network timestamp function timer */ - u32 gp1; /* GP1 timer register */ - u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ - u32 ucode_ver; /* uCode version */ - u32 hw_ver; /* HW Silicon version */ - u32 brd_ver; /* HW board version */ - u32 log_pc; /* log program counter */ - u32 frame_ptr; /* frame pointer */ - u32 stack_ptr; /* stack pointer */ - u32 hcmd; /* last host command header */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: - * rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: - * host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: - * enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: - * time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: - * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ - u32 wait_event; /* wait event() caller address */ - u32 l2p_control; /* L2pControlField */ - u32 l2p_duration; /* L2pDurationField */ - u32 l2p_mhvalid; /* L2pMhValidBits */ - u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on - * (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the - * compilation */ - u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; - -struct iwl_error_event_table { - u32 valid; /* (nonzero) valid, (0) log is empty */ - u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ - u32 blink2; /* branch link */ - u32 ilink1; /* interrupt link */ - u32 ilink2; /* interrupt link */ - u32 data1; /* error-specific data */ - u32 data2; /* error-specific data */ - u32 data3; /* error-specific data */ - u32 bcon_time; /* beacon timer */ - u32 tsf_low; /* network timestamp function timer */ - u32 tsf_hi; /* network timestamp function timer */ - u32 gp1; /* GP1 timer register */ - u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ - u32 major; /* uCode version major */ - u32 minor; /* uCode version minor */ - u32 hw_ver; /* HW Silicon version */ - u32 brd_ver; /* HW board version */ - u32 log_pc; /* log program counter */ - u32 frame_ptr; /* frame pointer */ - u32 stack_ptr; /* stack pointer */ - u32 hcmd; /* last host command header */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: - * rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: - * host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: - * enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: - * time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: - * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ - u32 wait_event; /* wait event() caller address */ - u32 l2p_control; /* L2pControlField */ - u32 l2p_duration; /* L2pDurationField */ - u32 l2p_mhvalid; /* L2pMhValidBits */ - u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on - * (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the - * compilation */ - u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; - -/* - * UMAC error struct - relevant starting from family 8000 chip. - * Note: This structure is read from the device with IO accesses, - * and the reading already does the endian conversion. As it is - * read with u32-sized accesses, any members with a different size - * need to be ordered correctly though! - */ -struct iwl_umac_error_event_table { - u32 valid; /* (nonzero) valid, (0) log is empty */ - u32 error_id; /* type of error */ - u32 blink1; /* branch link */ - u32 blink2; /* branch link */ - u32 ilink1; /* interrupt link */ - u32 ilink2; /* interrupt link */ - u32 data1; /* error-specific data */ - u32 data2; /* error-specific data */ - u32 data3; /* error-specific data */ - u32 umac_major; - u32 umac_minor; - u32 frame_pointer; /* core register 27*/ - u32 stack_pointer; /* core register 28 */ - u32 cmd_header; /* latest host cmd sent to UMAC */ - u32 nic_isr_pref; /* ISR status register */ -} __packed; - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) -{ - struct iwl_trans *trans = mvm->trans; - struct iwl_umac_error_event_table table; - u32 base; - - base = mvm->umac_error_event_table; - - if (base < 0x800000) { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - mvm->status, table.valid); - } - - IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); - IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); - IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); - IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); - IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); - IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); - IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); - IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); - IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); - IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); - IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); - IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); -} - -static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) -{ - struct iwl_trans *trans = mvm->trans; - struct iwl_error_event_table_v1 table; - u32 base; - - base = mvm->error_event_table; - if (mvm->cur_ucode == IWL_UCODE_INIT) { - if (!base) - base = mvm->fw->init_errlog_ptr; - } else { - if (!base) - base = mvm->fw->inst_errlog_ptr; - } - - if (base < 0x800000) { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - mvm->status, table.valid); - } - - /* Do not change this output - scripts rely on it */ - - IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); - - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.data3, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, 0, - table.hw_ver, table.brd_ver); - IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); - IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); - IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); - IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); - IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); - IWL_ERR(mvm, "0x%08X | data1\n", table.data1); - IWL_ERR(mvm, "0x%08X | data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | data3\n", table.data3); - IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); - IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); - IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); - IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); - IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); - IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); - IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); - IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); - IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); - IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); - IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); - IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); - IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); - IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); - IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); - - if (mvm->support_umac_log) - iwl_mvm_dump_umac_error_log(mvm); -} - -void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) -{ - struct iwl_trans *trans = mvm->trans; - struct iwl_error_event_table table; - u32 base; - - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { - iwl_mvm_dump_nic_error_log_old(mvm); - return; - } - - base = mvm->error_event_table; - if (mvm->cur_ucode == IWL_UCODE_INIT) { - if (!base) - base = mvm->fw->init_errlog_ptr; - } else { - if (!base) - base = mvm->fw->inst_errlog_ptr; - } - - if (base < 0x800000) { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - mvm->status, table.valid); - } - - /* Do not change this output - scripts rely on it */ - - IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); - - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.data3, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.major, - table.minor, table.hw_ver, table.brd_ver); - IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); - IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); - IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); - IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); - IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); - IWL_ERR(mvm, "0x%08X | data1\n", table.data1); - IWL_ERR(mvm, "0x%08X | data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | data3\n", table.data3); - IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); - IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); - IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); - IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); - IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); - IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); - IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); - IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); - IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); - IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); - IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); - IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); - IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); - IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); - IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); - IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); - - if (mvm->support_umac_log) - iwl_mvm_dump_umac_error_log(mvm); -} - -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) -{ - int i; - - lockdep_assert_held(&mvm->queue_info_lock); - - for (i = minq; i <= maxq; i++) - if (mvm->queue_info[i].hw_queue_refcount == 0 && - !mvm->queue_info[i].setup_reserved) - return i; - - return -ENOSPC; -} - -void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout) -{ - bool enable_queue = true; - - spin_lock_bh(&mvm->queue_info_lock); - - /* Make sure this TID isn't already enabled */ - if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n", - cfg->tid); - return; - } - - /* Update mappings and refcounts */ - mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); - mvm->queue_info[queue].hw_queue_refcount++; - if (mvm->queue_info[queue].hw_queue_refcount > 1) - enable_queue = false; - mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); - - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", - queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211); - - spin_unlock_bh(&mvm->queue_info_lock); - - /* Send the enabling command if we need to */ - if (enable_queue) { - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .enable = 1, - .window = cfg->frame_limit, - .sta_id = cfg->sta_id, - .ssn = cpu_to_le16(ssn), - .tx_fifo = cfg->fifo, - .aggregate = cfg->aggregate, - .tid = cfg->tid, - }; - - iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, - wdg_timeout); - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), - &cmd), - "Failed to configure queue %d on FIFO %d\n", queue, - cfg->fifo); - } -} - -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .enable = 0, - }; - bool remove_mac_queue = true; - int ret; - - spin_lock_bh(&mvm->queue_info_lock); - - if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { - spin_unlock_bh(&mvm->queue_info_lock); - return; - } - - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - - /* - * If there is another TID with the same AC - don't remove the MAC queue - * from the mapping - */ - if (tid < IWL_MAX_TID_COUNT) { - unsigned long tid_bitmap = - mvm->queue_info[queue].tid_bitmap; - int ac = tid_to_mac80211_ac[tid]; - int i; - - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { - if (tid_to_mac80211_ac[i] == ac) - remove_mac_queue = false; - } - } - - if (remove_mac_queue) - mvm->queue_info[queue].hw_queue_to_mac80211 &= - ~BIT(mac80211_queue); - mvm->queue_info[queue].hw_queue_refcount--; - - cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; - - IWL_DEBUG_TX_QUEUES(mvm, - "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", - queue, - mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211); - - /* If the queue is still enabled - nothing left to do in this func */ - if (cmd.enable) { - spin_unlock_bh(&mvm->queue_info_lock); - return; - } - - /* Make sure queue info is correct even though we overwrite it */ - WARN(mvm->queue_info[queue].hw_queue_refcount || - mvm->queue_info[queue].tid_bitmap || - mvm->queue_info[queue].hw_queue_to_mac80211, - "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", - queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211, - mvm->queue_info[queue].tid_bitmap); - - /* If we are here - the queue is freed and we can zero out these vals */ - mvm->queue_info[queue].hw_queue_refcount = 0; - mvm->queue_info[queue].tid_bitmap = 0; - mvm->queue_info[queue].hw_queue_to_mac80211 = 0; - - spin_unlock_bh(&mvm->queue_info_lock); - - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", - queue, ret); -} - -/** - * iwl_mvm_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. - * - * The link quality command is sent as the last step of station creation. - * This is the special case in which init is set and we call a callback in - * this case to clear the state indicating that station creation is in - * progress. - */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) -{ - struct iwl_host_cmd cmd = { - .id = LQ_CMD, - .len = { sizeof(struct iwl_lq_cmd), }, - .flags = init ? 0 : CMD_ASYNC, - .data = { lq, }, - }; - - if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) - return -EINVAL; - - return iwl_mvm_send_cmd(mvm, &cmd); -} - -/** - * iwl_mvm_update_smps - Get a request to change the SMPS mode - * @req_type: The part of the driver who call for a change. - * @smps_requests: The request to change the SMPS mode. - * - * Get a requst to change the SMPS mode, - * and change it according to all other requests in the driver. - */ -void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum iwl_mvm_smps_type_request req_type, - enum ieee80211_smps_mode smps_request) -{ - struct iwl_mvm_vif *mvmvif; - enum ieee80211_smps_mode smps_mode; - int i; - - lockdep_assert_held(&mvm->mutex); - - /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ - if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) - return; - - if (vif->type == NL80211_IFTYPE_AP) - smps_mode = IEEE80211_SMPS_OFF; - else - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvmvif->smps_requests[req_type] = smps_request; - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { - smps_mode = IEEE80211_SMPS_STATIC; - break; - } - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) - smps_mode = IEEE80211_SMPS_DYNAMIC; - } - - ieee80211_request_smps(vif, smps_mode); -} - -int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) -{ - struct iwl_statistics_cmd scmd = { - .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, - }; - struct iwl_host_cmd cmd = { - .id = STATISTICS_CMD, - .len[0] = sizeof(scmd), - .data[0] = &scmd, - .flags = CMD_WANT_SKB, - }; - int ret; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) - return ret; - - iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); - iwl_free_resp(&cmd); - - if (clear) - iwl_mvm_accu_radio_stats(mvm); - - return 0; -} - -void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) -{ - mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; - mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; - mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; - mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; -} - -static void iwl_mvm_diversity_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - bool *result = _data; - int i; - - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || - mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) - *result = false; - } -} - -bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) -{ - bool result = true; - - lockdep_assert_held(&mvm->mutex); - - if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) - return false; - - if (mvm->cfg->rx_with_siso_diversity) - return false; - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_diversity_iter, &result); - - return result; -} - -int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool value) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int res; - - lockdep_assert_held(&mvm->mutex); - - if (mvmvif->low_latency == value) - return 0; - - mvmvif->low_latency = value; - - res = iwl_mvm_update_quotas(mvm, false, NULL); - if (res) - return res; - - iwl_mvm_bt_coex_vif_change(mvm); - - return iwl_mvm_power_update_mac(mvm); -} - -static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) -{ - bool *result = _data; - - if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) - *result = true; -} - -bool iwl_mvm_low_latency(struct iwl_mvm *mvm) -{ - bool result = false; - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_ll_iter, &result); - - return result; -} - -struct iwl_bss_iter_data { - struct ieee80211_vif *vif; - bool error; -}; - -static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_bss_iter_data *data = _data; - - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) - return; - - if (data->vif) { - data->error = true; - return; - } - - data->vif = vif; -} - -struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) -{ - struct iwl_bss_iter_data bss_iter_data = {}; - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bss_iface_iterator, &bss_iter_data); - - if (bss_iter_data.error) { - IWL_ERR(mvm, "More than one managed interface active!\n"); - return ERR_PTR(-EINVAL); - } - - return bss_iter_data.vif; -} - -unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool tdls, bool cmd_q) -{ - struct iwl_fw_dbg_trigger_tlv *trigger; - struct iwl_fw_dbg_trigger_txq_timer *txq_timer; - unsigned int default_timeout = - cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) - return iwlmvm_mod_params.tfd_q_hang_detect ? - default_timeout : IWL_WATCHDOG_DISABLED; - - trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); - txq_timer = (void *)trigger->data; - - if (tdls) - return le32_to_cpu(txq_timer->tdls); - - if (cmd_q) - return le32_to_cpu(txq_timer->command_queue); - - if (WARN_ON(!vif)) - return default_timeout; - - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_ADHOC: - return le32_to_cpu(txq_timer->ibss); - case NL80211_IFTYPE_STATION: - return le32_to_cpu(txq_timer->bss); - case NL80211_IFTYPE_AP: - return le32_to_cpu(txq_timer->softap); - case NL80211_IFTYPE_P2P_CLIENT: - return le32_to_cpu(txq_timer->p2p_client); - case NL80211_IFTYPE_P2P_GO: - return le32_to_cpu(txq_timer->p2p_go); - case NL80211_IFTYPE_P2P_DEVICE: - return le32_to_cpu(txq_timer->p2p_device); - default: - WARN_ON(1); - return mvm->cfg->base_params->wd_timeout; - } -} - -void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - const char *errmsg) -{ - struct iwl_fw_dbg_trigger_tlv *trig; - struct iwl_fw_dbg_trigger_mlme *trig_mlme; - - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) - goto out; - - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); - trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) - goto out; - - if (trig_mlme->stop_connection_loss && - --trig_mlme->stop_connection_loss) - goto out; - - iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); - -out: - ieee80211_connection_loss(vif); -} diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c deleted file mode 100644 index 644b58bc5226..000000000000 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ /dev/null @@ -1,685 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include - -#include "iwl-trans.h" -#include "iwl-drv.h" -#include "internal.h" - -#define IWL_PCI_DEVICE(dev, subdev, cfg) \ - .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ - .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ - .driver_data = (kernel_ulong_t)&(cfg) - -/* Hardware specific file defines the PCI IDs table for that hardware module */ -static const struct pci_device_id iwl_hw_card_ids[] = { -#if IS_ENABLED(CONFIG_IWLDVM) - {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ - -/* 5300 Series WiFi */ - {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ - -/* 5350 Series WiFi/WiMax */ - {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ - -/* 5150 Series Wifi/WiMax */ - {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ - - {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ - -/* 6x00 Series */ - {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, - {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, - -/* 6x05 Series */ - {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ - {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ - -/* 6x30 Series */ - {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, - -/* 6x50 WiFi/WiMax Series */ - {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, - -/* 6150 WiFi/WiMax Series */ - {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, - -/* 1000 Series WiFi */ - {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, - -/* 100 Series WiFi */ - {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, - {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, - -/* 130 Series WiFi */ - {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, - {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, - -/* 2x00 Series */ - {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, - -/* 2x30 Series */ - {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, - -/* 6x35 Series */ - {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, - -/* 105 Series */ - {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, - -/* 135 Series */ - {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, -#endif /* CONFIG_IWLDVM */ - -#if IS_ENABLED(CONFIG_IWLMVM) -/* 7260 Series */ - {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, - {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, - -/* 3160 Series */ - {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, - -/* 3165 Series */ - {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, - -/* 7265 Series */ - {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, - -/* 8000 Series */ - {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, - {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, -#endif /* CONFIG_IWLMVM */ - - {0} -}; -MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); - -#ifdef CONFIG_ACPI -#define SPL_METHOD "SPLC" -#define SPL_DOMAINTYPE_MODULE BIT(0) -#define SPL_DOMAINTYPE_WIFI BIT(1) -#define SPL_DOMAINTYPE_WIGIG BIT(2) -#define SPL_DOMAINTYPE_RFEM BIT(3) - -static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) -{ - union acpi_object *limits, *domain_type, *power_limit; - - if (splx->type != ACPI_TYPE_PACKAGE || - splx->package.count != 2 || - splx->package.elements[0].type != ACPI_TYPE_INTEGER || - splx->package.elements[0].integer.value != 0) { - IWL_ERR(trans, "Unsupported splx structure\n"); - return 0; - } - - limits = &splx->package.elements[1]; - if (limits->type != ACPI_TYPE_PACKAGE || - limits->package.count < 2 || - limits->package.elements[0].type != ACPI_TYPE_INTEGER || - limits->package.elements[1].type != ACPI_TYPE_INTEGER) { - IWL_ERR(trans, "Invalid limits element\n"); - return 0; - } - - domain_type = &limits->package.elements[0]; - power_limit = &limits->package.elements[1]; - if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { - IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); - return 0; - } - - return power_limit->integer.value; -} - -static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) -{ - acpi_handle pxsx_handle; - acpi_handle handle; - struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - - pxsx_handle = ACPI_HANDLE(&pdev->dev); - if (!pxsx_handle) { - IWL_DEBUG_INFO(trans, - "Could not retrieve root port ACPI handle\n"); - return; - } - - /* Get the method's handle */ - status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); - if (ACPI_FAILURE(status)) { - IWL_DEBUG_INFO(trans, "SPL method not found\n"); - return; - } - - /* Call SPLC with no arguments */ - status = acpi_evaluate_object(handle, NULL, NULL, &splx); - if (ACPI_FAILURE(status)) { - IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); - return; - } - - trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); - IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", - trans->dflt_pwr_limit); - kfree(splx.pointer); -} - -#else /* CONFIG_ACPI */ -static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {} -#endif - -/* PCI registers */ -#define PCI_CFG_RETRY_TIMEOUT 0x041 - -static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); - const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; - struct iwl_trans *iwl_trans; - struct iwl_trans_pcie *trans_pcie; - int ret; - - iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); - if (IS_ERR(iwl_trans)) - return PTR_ERR(iwl_trans); - -#if IS_ENABLED(CONFIG_IWLMVM) - /* - * special-case 7265D, it has the same PCI IDs. - * - * Note that because we already pass the cfg to the transport above, - * all the parameters that the transport uses must, until that is - * changed, be identical to the ones in the 7265D configuration. - */ - if (cfg == &iwl7265_2ac_cfg) - cfg_7265d = &iwl7265d_2ac_cfg; - else if (cfg == &iwl7265_2n_cfg) - cfg_7265d = &iwl7265d_2n_cfg; - else if (cfg == &iwl7265_n_cfg) - cfg_7265d = &iwl7265d_n_cfg; - if (cfg_7265d && - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { - cfg = cfg_7265d; - iwl_trans->cfg = cfg_7265d; - } -#endif - - pci_set_drvdata(pdev, iwl_trans); - - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); - trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); - - if (IS_ERR(trans_pcie->drv)) { - ret = PTR_ERR(trans_pcie->drv); - goto out_free_trans; - } - - set_dflt_pwr_limit(iwl_trans, pdev); - - /* register transport layer debugfs here */ - ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir); - if (ret) - goto out_free_drv; - - return 0; - -out_free_drv: - iwl_drv_stop(trans_pcie->drv); -out_free_trans: - iwl_trans_pcie_free(iwl_trans); - return ret; -} - -static void iwl_pci_remove(struct pci_dev *pdev) -{ - struct iwl_trans *trans = pci_get_drvdata(pdev); - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - iwl_drv_stop(trans_pcie->drv); - iwl_trans_pcie_free(trans); -} - -#ifdef CONFIG_PM_SLEEP - -static int iwl_pci_suspend(struct device *device) -{ - /* Before you put code here, think about WoWLAN. You cannot check here - * whether WoWLAN is enabled or not, and your code will run even if - * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. - */ - - return 0; -} - -static int iwl_pci_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; - - /* Before you put code here, think about WoWLAN. You cannot check here - * whether WoWLAN is enabled or not, and your code will run even if - * WoWLAN is enabled - the NIC may be alive. - */ - - /* - * We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state. - */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - - if (!trans->op_mode) - return 0; - - /* - * Enable rfkill interrupt (in order to keep track of - * the rfkill status) - */ - iwl_enable_rfkill_int(trans); - - hw_rfkill = iwl_is_rfkill_set(trans); - - mutex_lock(&trans_pcie->mutex); - iwl_trans_pcie_rf_kill(trans, hw_rfkill); - mutex_unlock(&trans_pcie->mutex); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); - -#define IWL_PM_OPS (&iwl_dev_pm_ops) - -#else - -#define IWL_PM_OPS NULL - -#endif - -static struct pci_driver iwl_pci_driver = { - .name = DRV_NAME, - .id_table = iwl_hw_card_ids, - .probe = iwl_pci_probe, - .remove = iwl_pci_remove, - .driver.pm = IWL_PM_OPS, -}; - -int __must_check iwl_pci_register_driver(void) -{ - int ret; - ret = pci_register_driver(&iwl_pci_driver); - if (ret) - pr_err("Unable to initialize PCI module\n"); - - return ret; -} - -void iwl_pci_unregister_driver(void) -{ - pci_unregister_driver(&iwl_pci_driver); -} diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h deleted file mode 100644 index feb2f7e81134..000000000000 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ /dev/null @@ -1,569 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#ifndef __iwl_trans_int_pcie_h__ -#define __iwl_trans_int_pcie_h__ - -#include -#include -#include -#include -#include -#include - -#include "iwl-fh.h" -#include "iwl-csr.h" -#include "iwl-trans.h" -#include "iwl-debug.h" -#include "iwl-io.h" -#include "iwl-op-mode.h" - -/* We need 2 entries for the TX command and header, and another one might - * be needed for potential data in the SKB's head. The remaining ones can - * be used for frags. - */ -#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) - -/* - * RX related structures and functions - */ -#define RX_NUM_QUEUES 1 -#define RX_POST_REQ_ALLOC 2 -#define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 - -struct iwl_host_cmd; - -/*This file includes the declaration that are internal to the - * trans_pcie layer */ - -struct iwl_rx_mem_buffer { - dma_addr_t page_dma; - struct page *page; - struct list_head list; -}; - -/** - * struct isr_statistics - interrupt statistics - * - */ -struct isr_statistics { - u32 hw; - u32 sw; - u32 err_code; - u32 sch; - u32 alive; - u32 rfkill; - u32 ctkill; - u32 wakeup; - u32 rx; - u32 tx; - u32 unhandled; -}; - -/** - * struct iwl_rxq - Rx queue - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) - * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @read: Shared index to newest available Rx buffer - * @write: Shared index to oldest written Rx packet - * @free_count: Number of pre-allocated buffers in rx_free - * @used_count: Number of RBDs handled to allocator to use for allocation - * @write_actual: - * @rx_free: list of RBDs with allocated RB ready for use - * @rx_used: list of RBDs with no RB attached - * @need_update: flag to indicate we need to update read/write index - * @rb_stts: driver's pointer to receive buffer status - * @rb_stts_dma: bus address of receive buffer status - * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue - * @queue: actual rx queue - * - * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers - */ -struct iwl_rxq { - __le32 *bd; - dma_addr_t bd_dma; - u32 read; - u32 write; - u32 free_count; - u32 used_count; - u32 write_actual; - struct list_head rx_free; - struct list_head rx_used; - bool need_update; - struct iwl_rb_status *rb_stts; - dma_addr_t rb_stts_dma; - spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; -}; - -/** - * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator - * @req_pending: number of requests the allcator had not processed yet - * @req_ready: number of requests honored and ready for claiming - * @rbd_allocated: RBDs with pages allocated and ready to be handled to - * the queue. This is a list of &struct iwl_rx_mem_buffer - * @rbd_empty: RBDs with no page attached for allocator use. This is a list - * of &struct iwl_rx_mem_buffer - * @lock: protects the rbd_allocated and rbd_empty lists - * @alloc_wq: work queue for background calls - * @rx_alloc: work struct for background calls - */ -struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; - atomic_t req_pending; - atomic_t req_ready; - struct list_head rbd_allocated; - struct list_head rbd_empty; - spinlock_t lock; - struct workqueue_struct *alloc_wq; - struct work_struct rx_alloc; -}; - -struct iwl_dma_ptr { - dma_addr_t dma; - void *addr; - size_t size; -}; - -/** - * iwl_queue_inc_wrap - increment queue index, wrap back to beginning - * @index -- current index - */ -static inline int iwl_queue_inc_wrap(int index) -{ - return ++index & (TFD_QUEUE_SIZE_MAX - 1); -} - -/** - * iwl_queue_dec_wrap - decrement queue index, wrap back to end - * @index -- current index - */ -static inline int iwl_queue_dec_wrap(int index) -{ - return --index & (TFD_QUEUE_SIZE_MAX - 1); -} - -struct iwl_cmd_meta { - /* only for SYNC commands, iff the reply skb is wanted */ - struct iwl_host_cmd *source; - u32 flags; -}; - -/* - * Generic queue structure - * - * Contains common data for Rx and Tx queues. - * - * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware - * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless - * there might be HW changes in the future). For the normal TX - * queues, n_window, which is the size of the software queue data - * is also 256; however, for the command queue, n_window is only - * 32 since we don't need so many commands pending. Since the HW - * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, - * the software buffers (in the variables @meta, @txb in struct - * iwl_txq) only have 32 entries, while the HW buffers (@tfds in - * the same struct) have 256. - * This means that we end up with the following: - * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | - * SW entries: | 0 | ... | 31 | - * where N is a number between 0 and 7. This means that the SW - * data is a window overlayed over the HW queue. - */ -struct iwl_queue { - int write_ptr; /* 1-st empty entry (index) host_w*/ - int read_ptr; /* last used entry (index) host_r*/ - /* use for monitoring and recovering the stuck queue */ - dma_addr_t dma_addr; /* physical addr for BD's */ - int n_window; /* safe queue window */ - u32 id; - int low_mark; /* low watermark, resume queue if free - * space more than this */ - int high_mark; /* high watermark, stop queue if free - * space less than this */ -}; - -#define TFD_TX_CMD_SLOTS 256 -#define TFD_CMD_SLOTS 32 - -/* - * The FH will write back to the first TB only, so we need - * to copy some data into the buffer regardless of whether - * it should be mapped or not. This indicates how big the - * first TB must be to include the scratch buffer. Since - * the scratch is 4 bytes at offset 12, it's 16 now. If we - * make it bigger then allocations will be bigger and copy - * slower, so that's probably not useful. - */ -#define IWL_HCMD_SCRATCHBUF_SIZE 16 - -struct iwl_pcie_txq_entry { - struct iwl_device_cmd *cmd; - struct sk_buff *skb; - /* buffer to free after command completes */ - const void *free_buf; - struct iwl_cmd_meta meta; -}; - -struct iwl_pcie_txq_scratch_buf { - struct iwl_cmd_header hdr; - u8 buf[8]; - __le32 scratch; -}; - -/** - * struct iwl_txq - Tx Queue for DMA - * @q: generic Rx/Tx queue descriptor - * @tfds: transmit frame descriptors (DMA memory) - * @scratchbufs: start of command headers, including scratch buffers, for - * the writeback -- this is DMA memory and an array holding one buffer - * for each command on the queue - * @scratchbufs_dma: DMA address for the scratchbufs start - * @entries: transmit entries (driver state) - * @lock: queue lock - * @stuck_timer: timer that fires if queue gets stuck - * @trans_pcie: pointer back to transport (for timer) - * @need_update: indicates need to update read/write index - * @active: stores if queue is active - * @ampdu: true if this queue is an ampdu queue for an specific RA/TID - * @wd_timeout: queue watchdog timeout (jiffies) - per queue - * @frozen: tx stuck queue timer is frozen - * @frozen_expiry_remainder: remember how long until the timer fires - * - * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame - * descriptors) and required locking structures. - */ -struct iwl_txq { - struct iwl_queue q; - struct iwl_tfd *tfds; - struct iwl_pcie_txq_scratch_buf *scratchbufs; - dma_addr_t scratchbufs_dma; - struct iwl_pcie_txq_entry *entries; - spinlock_t lock; - unsigned long frozen_expiry_remainder; - struct timer_list stuck_timer; - struct iwl_trans_pcie *trans_pcie; - bool need_update; - bool frozen; - u8 active; - bool ampdu; - unsigned long wd_timeout; -}; - -static inline dma_addr_t -iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) -{ - return txq->scratchbufs_dma + - sizeof(struct iwl_pcie_txq_scratch_buf) * idx; -} - -/** - * struct iwl_trans_pcie - PCIe transport specific data - * @rxq: all the RX queue data - * @rba: allocator for RX replenishing - * @drv - pointer to iwl_drv - * @trans: pointer to the generic transport area - * @scd_base_addr: scheduler sram base address in SRAM - * @scd_bc_tbls: pointer to the byte count table of the scheduler - * @kw: keep warm address - * @pci_dev: basic pci-network driver stuff - * @hw_base: pci hardware address support - * @ucode_write_complete: indicates that the ucode has been copied. - * @ucode_write_waitq: wait queue for uCode load - * @cmd_queue - command queue number - * @rx_buf_size_8k: 8 kB RX buffer size - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) - * @scd_set_active: should the transport configure the SCD for HCMD queue - * @wide_cmd_header: true when ucode supports wide command header format - * @rx_page_order: page order for receive buffer size - * @reg_lock: protect hw register access - * @mutex: to protect stop_device / start_fw / start_hw - * @cmd_in_flight: true when we have a host command in flight - * @fw_mon_phys: physical address of the buffer for the firmware monitor - * @fw_mon_page: points to the first page of the buffer for the firmware monitor - * @fw_mon_size: size of the buffer for the firmware monitor - */ -struct iwl_trans_pcie { - struct iwl_rxq rxq; - struct iwl_rb_allocator rba; - struct iwl_trans *trans; - struct iwl_drv *drv; - - struct net_device napi_dev; - struct napi_struct napi; - - /* INT ICT Table */ - __le32 *ict_tbl; - dma_addr_t ict_tbl_dma; - int ict_index; - bool use_ict; - bool is_down; - struct isr_statistics isr_stats; - - spinlock_t irq_lock; - struct mutex mutex; - u32 inta_mask; - u32 scd_base_addr; - struct iwl_dma_ptr scd_bc_tbls; - struct iwl_dma_ptr kw; - - struct iwl_txq *txq; - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - - /* PCI bus related data */ - struct pci_dev *pci_dev; - void __iomem *hw_base; - - bool ucode_write_complete; - wait_queue_head_t ucode_write_waitq; - wait_queue_head_t wait_command_queue; - - u8 cmd_queue; - u8 cmd_fifo; - unsigned int cmd_q_wdg_timeout; - u8 n_no_reclaim_cmds; - u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; - - bool rx_buf_size_8k; - bool bc_table_dword; - bool scd_set_active; - bool wide_cmd_header; - u32 rx_page_order; - - const char *const *command_names; - - /*protect hw register */ - spinlock_t reg_lock; - bool cmd_hold_nic_awake; - bool ref_cmd_in_flight; - - /* protect ref counter */ - spinlock_t ref_lock; - u32 ref_count; - - dma_addr_t fw_mon_phys; - struct page *fw_mon_page; - u32 fw_mon_size; -}; - -#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ - ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) - -static inline struct iwl_trans * -iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) -{ - return container_of((void *)trans_pcie, struct iwl_trans, - trans_specific); -} - -/* - * Convention: trans API functions: iwl_trans_pcie_XXX - * Other functions: iwl_pcie_XXX - */ -struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct iwl_cfg *cfg); -void iwl_trans_pcie_free(struct iwl_trans *trans); - -/***************************************************** -* RX -******************************************************/ -int iwl_pcie_rx_init(struct iwl_trans *trans); -irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); -int iwl_pcie_rx_stop(struct iwl_trans *trans); -void iwl_pcie_rx_free(struct iwl_trans *trans); - -/***************************************************** -* ICT - interrupt handling -******************************************************/ -irqreturn_t iwl_pcie_isr(int irq, void *data); -int iwl_pcie_alloc_ict(struct iwl_trans *trans); -void iwl_pcie_free_ict(struct iwl_trans *trans); -void iwl_pcie_reset_ict(struct iwl_trans *trans); -void iwl_pcie_disable_ict(struct iwl_trans *trans); - -/***************************************************** -* TX / HCMD -******************************************************/ -int iwl_pcie_tx_init(struct iwl_trans *trans); -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); -int iwl_pcie_tx_stop(struct iwl_trans *trans); -void iwl_pcie_tx_free(struct iwl_trans *trans); -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout); -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, - bool configure_scd); -int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); -void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); -int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); -void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb); -void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs); -void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); - -void iwl_trans_pcie_ref(struct iwl_trans *trans); -void iwl_trans_pcie_unref(struct iwl_trans *trans); - -static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - -/***************************************************** -* Error handling -******************************************************/ -void iwl_pcie_dump_csr(struct iwl_trans *trans); - -/***************************************************** -* Helpers -******************************************************/ -static inline void iwl_disable_interrupts(struct iwl_trans *trans) -{ - clear_bit(STATUS_INT_ENABLED, &trans->status); - - /* disable interrupts from uCode/NIC to host */ - iwl_write32(trans, CSR_INT_MASK, 0x00000000); - - /* acknowledge/clear/reset any interrupts still pending - * from uCode or flow handler (Rx/Tx DMA) */ - iwl_write32(trans, CSR_INT, 0xffffffff); - iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); - IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); -} - -static inline void iwl_enable_interrupts(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); - set_bit(STATUS_INT_ENABLED, &trans->status); - trans_pcie->inta_mask = CSR_INI_SET_MASK; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); -} - -static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); - trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); -} - -static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); - iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); - } -} - -static inline void iwl_stop_queue(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { - iwl_op_mode_queue_full(trans->op_mode, txq->q.id); - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); - } else - IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", - txq->q.id); -} - -static inline bool iwl_queue_used(const struct iwl_queue *q, int i) -{ - return q->write_ptr >= q->read_ptr ? - (i >= q->read_ptr && i < q->write_ptr) : - !(i < q->read_ptr && i >= q->write_ptr); -} - -static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) -{ - return index & (q->n_window - 1); -} - -static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, - u8 cmd) -{ - if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) - return "UNKNOWN"; - return trans_pcie->command_names[cmd]; -} - -static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) -{ - return !(iwl_read32(trans, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); -} - -static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, - u32 reg, u32 mask, u32 value) -{ - u32 v; - -#ifdef CONFIG_IWLWIFI_DEBUG - WARN_ON_ONCE(value & ~mask); -#endif - - v = iwl_read32(trans, reg); - v &= ~mask; - v |= value; - iwl_write32(trans, reg, v); -} - -static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, - u32 reg, u32 mask) -{ - __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); -} - -static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, - u32 reg, u32 mask) -{ - __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); -} - -void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); - -#endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c deleted file mode 100644 index e06591f625c4..000000000000 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ /dev/null @@ -1,1548 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include - -#include "iwl-prph.h" -#include "iwl-io.h" -#include "internal.h" -#include "iwl-op-mode.h" - -/****************************************************************************** - * - * RX path functions - * - ******************************************************************************/ - -/* - * Rx theory of operation - * - * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), - * each of which point to Receive Buffers to be filled by the NIC. These get - * used not only for Rx frames, but for any command response or notification - * from the NIC. The driver and NIC manage the Rx buffers by means - * of indexes into the circular buffer. - * - * Rx Queue Indexes - * The host/firmware share two index registers for managing the Rx buffers. - * - * The READ index maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ index is managed by the firmware once the card is enabled. - * - * The WRITE index maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * INDEX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ index - * and fire the RX interrupt. The driver can then query the READ index and - * process as many packets as possible, moving the WRITE index forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. - * When the interrupt handler is called, the request is processed. - * The page is either stolen - transferred to the upper layer - * or reused - added immediately to the iwl->rxq->rx_free list. - * + When the page is stolen - the driver updates the matching queue's used - * count, detaches the RBD and transfers it to the queue used list. - * When there are two used RBDs - they are transferred to the allocator empty - * list. Work is then scheduled for the allocator to start allocating - * eight buffers. - * When there are another 6 used RBDs - they are transferred to the allocator - * empty list and the driver tries to claim the pre-allocated buffers and - * add them to iwl->rxq->rx_free. If it fails - it continues to claim them - * until ready. - * When there are 8+ buffers in the free list - either from allocation or from - * 8 reused unstolen pages - restock is called to update the FW and indexes. - * + In order to make sure the allocator always has RBDs to use for allocation - * the allocator has initial pool in the size of num_queues*(8-2) - the - * maximum missing RBDs per allocation request (request posted with 2 - * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). - * The queues supplies the recycle of the rest of the RBDs. - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' index is updated. - * + If there are no allocated buffers in iwl->rxq->rx_free, - * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. - * If there were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * iwl_rxq_alloc() Allocates rx_free - * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock. - * Used only during initialization. - * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE index. - * iwl_pcie_rx_allocator() Background work for allocating pages. - * - * -- enable interrupts -- - * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the - * READ INDEX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Posts and claims requests to the allocator. - * Calls iwl_pcie_rxq_restock to refill any empty - * slots. - * - * RBD life-cycle: - * - * Init: - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue - * - * Regular Receive interrupt: - * Page Stolen: - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue - * Page not Stolen: - * rxq.queue -> rxq.rx_free -> rxq.queue - * ... - * - */ - -/* - * iwl_rxq_space - Return number of free slots available in queue. - */ -static int iwl_rxq_space(const struct iwl_rxq *rxq) -{ - /* Make sure RX_QUEUE_SIZE is a power of 2 */ - BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); - - /* - * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity - * between empty and completely full queues. - * The following is equivalent to modulo by RX_QUEUE_SIZE and is well - * defined for negative dividends. - */ - return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); -} - -/* - * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) -{ - return cpu_to_le32((u32)(dma_addr >> 8)); -} - -/* - * iwl_pcie_rx_stop - stops the Rx DMA - */ -int iwl_pcie_rx_stop(struct iwl_trans *trans) -{ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, - FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); -} - -/* - * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue - */ -static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 reg; - - lockdep_assert_held(&rxq->lock); - - /* - * explicitly wake up the NIC if: - * 1. shadow registers aren't enabled - * 2. there is a chance that the NIC is asleep - */ - if (!trans->cfg->base_params->shadow_reg_enable && - test_bit(STATUS_TPOWER_PMI, &trans->status)) { - reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", - reg); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - rxq->need_update = true; - return; - } - } - - rxq->write_actual = round_down(rxq->write, 8); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); -} - -static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - - spin_lock(&rxq->lock); - - if (!rxq->need_update) - goto exit_unlock; - - iwl_pcie_rxq_inc_wr_ptr(trans); - rxq->need_update = false; - - exit_unlock: - spin_unlock(&rxq->lock); -} - -/* - * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. - */ -static void iwl_pcie_rxq_restock(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rx_mem_buffer *rxb; - - /* - * If the device isn't enabled - not need to try to add buffers... - * This can happen when we stop the device and still have an interrupt - * pending. We stop the APM before we sync the interrupts because we - * have to (see comment there). On the other hand, since the APM is - * stopped, we cannot access the HW (in particular not prph). - * So don't try to restock if the APM has been already stopped. - */ - if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - return; - - spin_lock(&rxq->lock); - while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { - /* The overwritten rxb must be a used one */ - rxb = rxq->queue[rxq->write]; - BUG_ON(rxb && rxb->page); - - /* Get next free Rx buffer, remove from free list */ - rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, - list); - list_del(&rxb->list); - - /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); - rxq->queue[rxq->write] = rxb; - rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; - rxq->free_count--; - } - spin_unlock(&rxq->lock); - - /* If we've added more space for the firmware to place data, tell it. - * Increment device's write pointer in multiples of 8. */ - if (rxq->write_actual != (rxq->write & ~0x7)) { - spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); - spin_unlock(&rxq->lock); - } -} - -/* - * iwl_pcie_rx_alloc_page - allocates and returns a page. - * - */ -static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, - gfp_t priority) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct page *page; - gfp_t gfp_mask = priority; - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", - trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. -` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) - IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); - return NULL; - } - return page; -} - -/* - * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD - * - * A used RBD is an Rx buffer that has been given to the stack. To use it again - * a page must be allocated and the RBD must point to the page. This function - * doesn't change the HW pointer but handles the list of pages that is used by - * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly - * allocated buffers. - */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rx_mem_buffer *rxb; - struct page *page; - - while (1) { - spin_lock(&rxq->lock); - if (list_empty(&rxq->rx_used)) { - spin_unlock(&rxq->lock); - return; - } - spin_unlock(&rxq->lock); - - /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, priority); - if (!page) - return; - - spin_lock(&rxq->lock); - - if (list_empty(&rxq->rx_used)) { - spin_unlock(&rxq->lock); - __free_pages(page, trans_pcie->rx_page_order); - return; - } - rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, - list); - list_del(&rxb->list); - spin_unlock(&rxq->lock); - - BUG_ON(rxb->page); - rxb->page = page; - /* Get physical address of the RB */ - rxb->page_dma = - dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, rxb->page_dma)) { - rxb->page = NULL; - spin_lock(&rxq->lock); - list_add(&rxb->list, &rxq->rx_used); - spin_unlock(&rxq->lock); - __free_pages(page, trans_pcie->rx_page_order); - return; - } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - spin_lock(&rxq->lock); - - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - - spin_unlock(&rxq->lock); - } -} - -static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - int i; - - lockdep_assert_held(&rxq->lock); - - for (i = 0; i < RX_QUEUE_SIZE; i++) { - if (!rxq->pool[i].page) - continue; - dma_unmap_page(trans->dev, rxq->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); - rxq->pool[i].page = NULL; - } -} - -/* - * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free - * - * When moving to rx_free an page is allocated for the slot. - * - * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization - */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) -{ - iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); - - iwl_pcie_rxq_restock(trans); -} - -/* - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues - * - * Allocates for each received request 8 pages - * Called as a scheduled work item. - */ -static void iwl_pcie_rx_allocator(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct list_head local_empty; - int pending = atomic_xchg(&rba->req_pending, 0); - - IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); - - /* If we were scheduled - there is at least one request */ - spin_lock(&rba->lock); - /* swap out the rba->rbd_empty to a local list */ - list_replace_init(&rba->rbd_empty, &local_empty); - spin_unlock(&rba->lock); - - while (pending) { - int i; - struct list_head local_allocated; - - INIT_LIST_HEAD(&local_allocated); - - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { - struct iwl_rx_mem_buffer *rxb; - struct page *page; - - /* List should never be empty - each reused RBD is - * returned to the list, and initial pool covers any - * possible gap between the time the page is allocated - * to the time the RBD is added. - */ - BUG_ON(list_empty(&local_empty)); - /* Get the first rxb from the rbd list */ - rxb = list_first_entry(&local_empty, - struct iwl_rx_mem_buffer, list); - BUG_ON(rxb->page); - - /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); - if (!page) - continue; - rxb->page = page; - - /* Get physical address of the RB */ - rxb->page_dma = dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, rxb->page_dma)) { - rxb->page = NULL; - __free_pages(page, trans_pcie->rx_page_order); - continue; - } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - /* move the allocated entry to the out list */ - list_move(&rxb->list, &local_allocated); - i++; - } - - pending--; - if (!pending) { - pending = atomic_xchg(&rba->req_pending, 0); - IWL_DEBUG_RX(trans, - "Pending allocation requests = %d\n", - pending); - } - - spin_lock(&rba->lock); - /* add the allocated rbds to the allocator allocated list */ - list_splice_tail(&local_allocated, &rba->rbd_allocated); - /* get more empty RBDs for current pending requests */ - list_splice_tail_init(&rba->rbd_empty, &local_empty); - spin_unlock(&rba->lock); - - atomic_inc(&rba->req_ready); - } - - spin_lock(&rba->lock); - /* return unused rbds to the allocator empty list */ - list_splice_tail(&local_empty, &rba->rbd_empty); - spin_unlock(&rba->lock); -} - -/* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages -.* -.* Called by queue when the queue posted allocation request and - * has freed 8 RBDs in order to restock itself. - */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - /* - * atomic_dec_if_positive returns req_ready - 1 for any scenario. - * If req_ready is 0 atomic_dec_if_positive will return -1 and this - * function will return -ENOMEM, as there are no ready requests. - * atomic_dec_if_positive will perofrm the *actual* decrement only if - * req_ready > 0, i.e. - there are ready requests and the function - * hands one request to the caller. - */ - if (atomic_dec_if_positive(&rba->req_ready) < 0) - return -ENOMEM; - - spin_lock(&rba->lock); - for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { - /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); - } - spin_unlock(&rba->lock); - - return 0; -} - -static void iwl_pcie_rx_allocator_work(struct work_struct *data) -{ - struct iwl_rb_allocator *rba_p = - container_of(data, struct iwl_rb_allocator, rx_alloc); - struct iwl_trans_pcie *trans_pcie = - container_of(rba_p, struct iwl_trans_pcie, rba); - - iwl_pcie_rx_allocator(trans_pcie->trans); -} - -static int iwl_pcie_rx_alloc(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct device *dev = trans->dev; - - memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); - - spin_lock_init(&rxq->lock); - spin_lock_init(&rba->lock); - - if (WARN_ON(rxq->bd || rxq->rb_stts)) - return -EINVAL; - - /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - &rxq->bd_dma, GFP_KERNEL); - if (!rxq->bd) - goto err_bd; - - /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb_stts; - - return 0; - -err_rb_stts: - dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; -err_bd: - return -ENOMEM; -} - -static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 rb_size; - const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - - if (trans_pcie->rx_buf_size_8k) - rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; - else - rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; - - /* Stop Rx DMA */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - /* reset and flush pointers */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); - - /* Reset driver's Rx queue write index */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); - - /* Tell device where to find RBD circular buffer in DRAM */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, - (u32)(rxq->bd_dma >> 8)); - - /* Tell device where in DRAM to update its Rx status */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, - rxq->rb_stts_dma >> 4); - - /* Enable Rx DMA - * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in - * the credit mechanism in 5000 HW RX FIFO - * Direct rx interrupts to hosts - * Rx buffer size 4 or 8k - * RB timeout 0x10 - * 256 RBDs - */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | - FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | - FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - rb_size| - (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| - (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); - - /* Set interrupt coalescing timer to default (2048 usecs) */ - iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); - - /* W/A for interrupt coalescing bug in 7260 and 3160 */ - if (trans->cfg->host_interrupt_operation_mode) - iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); -} - -static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) -{ - int i; - - lockdep_assert_held(&rxq->lock); - - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - rxq->free_count = 0; - rxq->used_count = 0; - - for (i = 0; i < RX_QUEUE_SIZE; i++) - list_add(&rxq->pool[i].list, &rxq->rx_used); -} - -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; - - lockdep_assert_held(&rba->lock); - - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); - - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); -} - -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - lockdep_assert_held(&rba->lock); - - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } -} - -int iwl_pcie_rx_init(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i, err; - - if (!rxq->bd) { - err = iwl_pcie_rx_alloc(trans); - if (err) - return err; - } - if (!rba->alloc_wq) - rba->alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); - - spin_lock(&rba->lock); - atomic_set(&rba->req_pending, 0); - atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); - spin_unlock(&rba->lock); - - spin_lock(&rxq->lock); - - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rxq_free_rbs(trans); - iwl_pcie_rx_init_rxb_lists(rxq); - - for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - spin_unlock(&rxq->lock); - - iwl_pcie_rx_replenish(trans); - - iwl_pcie_rx_hw_init(trans, rxq); - - spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); - spin_unlock(&rxq->lock); - - return 0; -} - -void iwl_pcie_rx_free(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - /*if rxq->bd is NULL, it means that nothing has been allocated, - * exit now */ - if (!rxq->bd) { - IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); - return; - } - - cancel_work_sync(&rba->rx_alloc); - if (rba->alloc_wq) { - destroy_workqueue(rba->alloc_wq); - rba->alloc_wq = NULL; - } - - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); - - spin_lock(&rxq->lock); - iwl_pcie_rxq_free_rbs(trans); - spin_unlock(&rxq->lock); - - dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - else - IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); - rxq->rb_stts_dma = 0; - rxq->rb_stts = NULL; -} - -/* - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs - * - * Called when a RBD can be reused. The RBD is transferred to the allocator. - * When there are 2 empty RBDs - a request for allocation is posted - */ -static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb, - struct iwl_rxq *rxq, bool emergency) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - /* Move the RBD to the used list, will be moved to allocator in batches - * before claiming or posting a request*/ - list_add_tail(&rxb->list, &rxq->rx_used); - - if (unlikely(emergency)) - return; - - /* Count the allocator owned RBDs */ - rxq->used_count++; - - /* If we have RX_POST_REQ_ALLOC new released rx buffers - - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is - * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, - * after but we still need to post another request. - */ - if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { - /* Move the 2 RBDs to the allocator ownership. - Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_inc(&rba->req_pending); - queue_work(rba->alloc_wq, &rba->rx_alloc); - } -} - -static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb, - bool emergency) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - bool page_stolen = false; - int max_len = PAGE_SIZE << trans_pcie->rx_page_order; - u32 offset = 0; - - if (WARN_ON(!rxb)) - return; - - dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); - - while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { - struct iwl_rx_packet *pkt; - u16 sequence; - bool reclaim; - int index, cmd_index, len; - struct iwl_rx_cmd_buffer rxcb = { - ._offset = offset, - ._rx_page_order = trans_pcie->rx_page_order, - ._page = rxb->page, - ._page_stolen = false, - .truesize = max_len, - }; - - pkt = rxb_addr(&rxcb); - - if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) - break; - - IWL_DEBUG_RX(trans, - "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", - rxcb._offset, - get_cmd_string(trans_pcie, pkt->hdr.cmd), - pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); - - len = iwl_rx_packet_len(pkt); - len += sizeof(u32); /* account for status word */ - trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); - trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); - if (reclaim) { - int i; - - for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { - if (trans_pcie->no_reclaim_cmds[i] == - pkt->hdr.cmd) { - reclaim = false; - break; - } - } - } - - sequence = le16_to_cpu(pkt->hdr.sequence); - index = SEQ_TO_INDEX(sequence); - cmd_index = get_cmd_index(&txq->q, index); - - iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); - - if (reclaim) { - kzfree(txq->entries[cmd_index].free_buf); - txq->entries[cmd_index].free_buf = NULL; - } - - /* - * After here, we should always check rxcb._page_stolen, - * if it is true then one of the handlers took the page. - */ - - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking - * iwl_trans_send_cmd() - * as we reclaim the driver command queue */ - if (!rxcb._page_stolen) - iwl_pcie_hcmd_complete(trans, &rxcb); - else - IWL_WARN(trans, "Claim null rxb?\n"); - } - - page_stolen |= rxcb._page_stolen; - offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); - } - - /* page was stolen from us -- free our reference */ - if (page_stolen) { - __free_pages(rxb->page, trans_pcie->rx_page_order); - rxb->page = NULL; - } - - /* Reuse the page if possible. For notification packets and - * SKBs that fail to Rx correctly, add them back into the - * rx_free list for reuse later. */ - if (rxb->page != NULL) { - rxb->page_dma = - dma_map_page(trans->dev, rxb->page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, rxb->page_dma)) { - /* - * free the page(s) as well to not break - * the invariant that the items on the used - * list have no page(s) - */ - __free_pages(rxb->page, trans_pcie->rx_page_order); - rxb->page = NULL; - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); - } else { - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - } - } else - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); -} - -/* - * iwl_pcie_rx_handle - Main entry function for receiving responses from fw - */ -static void iwl_pcie_rx_handle(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i, j, count = 0; - bool emergency = false; - -restart: - spin_lock(&rxq->lock); - /* uCode's read index (stored in shared DRAM) indicates the last Rx - * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; - i = rxq->read; - - /* Rx interrupt, but nothing sent from uCode */ - if (i == r) - IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); - - while (i != r) { - struct iwl_rx_mem_buffer *rxb; - - if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) - emergency = true; - - rxb = rxq->queue[i]; - rxq->queue[i] = NULL; - - IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", - r, i, rxb); - iwl_pcie_rx_handle_rb(trans, rxb, emergency); - - i = (i + 1) & RX_QUEUE_MASK; - - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && - !emergency) { - /* Add the remaining 6 empty RBDs - * for allocator use - */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, - &rba->rbd_empty); - spin_unlock(&rba->lock); - } - - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } - } - } - if (emergency) { - count++; - if (count == 8) { - count = 0; - if (rxq->used_count < RX_QUEUE_SIZE / 3) - emergency = false; - spin_unlock(&rxq->lock); - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); - spin_lock(&rxq->lock); - } - } - /* handle restock for three cases, can be all of them at once: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated - * - we are in emergency and allocated buffers - */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); - goto restart; - } - } - - /* Backtrack one entry */ - rxq->read = i; - spin_unlock(&rxq->lock); - - /* - * handle a case where in emergency there are some unallocated RBDs. - * those RBDs are in the used list, but are not tracked by the queue's - * used_count which counts allocator owned RBDs. - * unallocated emergency RBDs must be allocated on exit, otherwise - * when called again the function may not be in emergency mode and - * they will be handed to the allocator with no tracking in the RBD - * allocator counters, which will lead to them never being claimed back - * by the queue. - * by allocating them here, they are now in the queue free list, and - * will be restocked by the next call of iwl_pcie_rxq_restock. - */ - if (unlikely(emergency && count)) - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); - - if (trans_pcie->napi.poll) - napi_gro_flush(&trans_pcie->napi, false); -} - -/* - * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card - */ -static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int i; - - /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ - if (trans->cfg->internal_wimax_coex && - !trans->cfg->apmg_not_supported && - (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & - APMS_CLK_VAL_MRB_FUNC_MODE) || - (iwl_read_prph(trans, APMG_PS_CTRL_REG) & - APMG_PS_CTRL_VAL_RESET_REQ))) { - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - iwl_op_mode_wimax_active(trans->op_mode); - wake_up(&trans_pcie->wait_command_queue); - return; - } - - iwl_pcie_dump_csr(trans); - iwl_dump_fh(trans, NULL); - - local_bh_disable(); - /* The STATUS_FW_ERROR bit is set in this function. This must happen - * before we wake up the command caller, to ensure a proper cleanup. */ - iwl_trans_fw_error(trans); - local_bh_enable(); - - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) - del_timer(&trans_pcie->txq[i].stuck_timer); - - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - wake_up(&trans_pcie->wait_command_queue); -} - -static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) -{ - u32 inta; - - lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); - - trace_iwlwifi_dev_irq(trans->dev); - - /* Discover which interrupts are active/pending */ - inta = iwl_read32(trans, CSR_INT); - - /* the thread will service interrupts and re-enable them */ - return inta; -} - -/* a device (PCI-E) page is 4096 bytes long */ -#define ICT_SHIFT 12 -#define ICT_SIZE (1 << ICT_SHIFT) -#define ICT_COUNT (ICT_SIZE / sizeof(u32)) - -/* interrupt handler using ict table, with this interrupt driver will - * stop using INTA register to get device's interrupt, reading this register - * is expensive, device will write interrupts in ICT dram table, increment - * index then will fire interrupt to driver, driver will OR all ICT table - * entries from current index up to table entry with 0 value. the result is - * the interrupt we need to service, driver will set the entries back to 0 and - * set index. - */ -static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 inta; - u32 val = 0; - u32 read; - - trace_iwlwifi_dev_irq(trans->dev); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); - trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); - if (!read) - return 0; - - /* - * Collect all entries up to the first 0, starting from ict_index; - * note we already read at ict_index. - */ - do { - val |= read; - IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", - trans_pcie->ict_index, read); - trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; - trans_pcie->ict_index = - ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); - - read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); - trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, - read); - } while (read); - - /* We should not get this value, just ignore it. */ - if (val == 0xffffffff) - val = 0; - - /* - * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit - * (bit 15 before shifting it to 31) to clear when using interrupt - * coalescing. fortunately, bits 18 and 19 stay set when this happens - * so we use them to decide on the real state of the Rx bit. - * In order words, bit 15 is set if bit 18 or bit 19 are set. - */ - if (val & 0xC0000) - val |= 0x8000; - - inta = (0xff & val) | ((0xff00 & val) << 16); - return inta; -} - -irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) -{ - struct iwl_trans *trans = dev_id; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct isr_statistics *isr_stats = &trans_pcie->isr_stats; - u32 inta = 0; - u32 handled = 0; - - lock_map_acquire(&trans->sync_cmd_lockdep_map); - - spin_lock(&trans_pcie->irq_lock); - - /* dram interrupt table not set yet, - * use legacy interrupt. - */ - if (likely(trans_pcie->use_ict)) - inta = iwl_pcie_int_cause_ict(trans); - else - inta = iwl_pcie_int_cause_non_ict(trans); - - if (iwl_have_debug_level(IWL_DL_ISR)) { - IWL_DEBUG_ISR(trans, - "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", - inta, trans_pcie->inta_mask, - iwl_read32(trans, CSR_INT_MASK), - iwl_read32(trans, CSR_FH_INT_STATUS)); - if (inta & (~trans_pcie->inta_mask)) - IWL_DEBUG_ISR(trans, - "We got a masked interrupt (0x%08x)\n", - inta & (~trans_pcie->inta_mask)); - } - - inta &= trans_pcie->inta_mask; - - /* - * Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. - */ - if (unlikely(!inta)) { - IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); - /* - * Re-enable interrupts here since we don't - * have anything to service - */ - if (test_bit(STATUS_INT_ENABLED, &trans->status)) - iwl_enable_interrupts(trans); - spin_unlock(&trans_pcie->irq_lock); - lock_map_release(&trans->sync_cmd_lockdep_map); - return IRQ_NONE; - } - - if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { - /* - * Hardware disappeared. It might have - * already raised an interrupt. - */ - IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); - spin_unlock(&trans_pcie->irq_lock); - goto out; - } - - /* Ack/clear/reset pending uCode interrupts. - * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, - */ - /* There is a hardware bug in the interrupt mask function that some - * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if - * they are disabled in the CSR_INT_MASK register. Furthermore the - * ICT interrupt handling mechanism has another bug that might cause - * these unmasked interrupts fail to be detected. We workaround the - * hardware bugs here by ACKing all the possible interrupts so that - * interrupt coalescing can still be achieved. - */ - iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); - - if (iwl_have_debug_level(IWL_DL_ISR)) - IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", - inta, iwl_read32(trans, CSR_INT_MASK)); - - spin_unlock(&trans_pcie->irq_lock); - - /* Now service all interrupt bits discovered above. */ - if (inta & CSR_INT_BIT_HW_ERR) { - IWL_ERR(trans, "Hardware error detected. Restarting.\n"); - - /* Tell the device to stop sending interrupts */ - iwl_disable_interrupts(trans); - - isr_stats->hw++; - iwl_pcie_irq_handle_error(trans); - - handled |= CSR_INT_BIT_HW_ERR; - - goto out; - } - - if (iwl_have_debug_level(IWL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(trans, - "Scheduler finished to transmit the frame/frames.\n"); - isr_stats->sch++; - } - - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - IWL_DEBUG_ISR(trans, "Alive interrupt\n"); - isr_stats->alive++; - } - } - - /* Safely ignore these bits for debug checks below */ - inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); - - /* HW RF KILL switch toggled */ - if (inta & CSR_INT_BIT_RF_KILL) { - bool hw_rfkill; - - hw_rfkill = iwl_is_rfkill_set(trans); - IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", - hw_rfkill ? "disable radio" : "enable radio"); - - isr_stats->rfkill++; - - mutex_lock(&trans_pcie->mutex); - iwl_trans_pcie_rf_kill(trans, hw_rfkill); - mutex_unlock(&trans_pcie->mutex); - if (hw_rfkill) { - set_bit(STATUS_RFKILL, &trans->status); - if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status)) - IWL_DEBUG_RF_KILL(trans, - "Rfkill while SYNC HCMD in flight\n"); - wake_up(&trans_pcie->wait_command_queue); - } else { - clear_bit(STATUS_RFKILL, &trans->status); - } - - handled |= CSR_INT_BIT_RF_KILL; - } - - /* Chip got too hot and stopped itself */ - if (inta & CSR_INT_BIT_CT_KILL) { - IWL_ERR(trans, "Microcode CT kill error detected.\n"); - isr_stats->ctkill++; - handled |= CSR_INT_BIT_CT_KILL; - } - - /* Error detected by uCode */ - if (inta & CSR_INT_BIT_SW_ERR) { - IWL_ERR(trans, "Microcode SW error detected. " - " Restarting 0x%X.\n", inta); - isr_stats->sw++; - iwl_pcie_irq_handle_error(trans); - handled |= CSR_INT_BIT_SW_ERR; - } - - /* uCode wakes up after power-down sleep */ - if (inta & CSR_INT_BIT_WAKEUP) { - IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); - iwl_pcie_rxq_check_wrptr(trans); - iwl_pcie_txq_check_wrptrs(trans); - - isr_stats->wakeup++; - - handled |= CSR_INT_BIT_WAKEUP; - } - - /* All uCode command responses, including Tx command responses, - * Rx "responses" (frame-received notification), and other - * notifications from uCode come through here*/ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | - CSR_INT_BIT_RX_PERIODIC)) { - IWL_DEBUG_ISR(trans, "Rx interrupt\n"); - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { - handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); - iwl_write32(trans, CSR_FH_INT_STATUS, - CSR_FH_INT_RX_MASK); - } - if (inta & CSR_INT_BIT_RX_PERIODIC) { - handled |= CSR_INT_BIT_RX_PERIODIC; - iwl_write32(trans, - CSR_INT, CSR_INT_BIT_RX_PERIODIC); - } - /* Sending RX interrupt require many steps to be done in the - * the device: - * 1- write interrupt to current index in ICT table. - * 2- dma RX frame. - * 3- update RX shared data to indicate last write index. - * 4- send interrupt. - * This could lead to RX race, driver could receive RX interrupt - * but the shared data changes does not reflect this; - * periodic interrupt will detect any dangling Rx activity. - */ - - /* Disable periodic interrupt; we use it as just a one-shot. */ - iwl_write8(trans, CSR_INT_PERIODIC_REG, - CSR_INT_PERIODIC_DIS); - - /* - * Enable periodic interrupt in 8 msec only if we received - * real RX interrupt (instead of just periodic int), to catch - * any dangling Rx interrupt. If it was just the periodic - * interrupt, there was no dangling Rx activity, and no need - * to extend the periodic interrupt; one-shot is enough. - */ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) - iwl_write8(trans, CSR_INT_PERIODIC_REG, - CSR_INT_PERIODIC_ENA); - - isr_stats->rx++; - - local_bh_disable(); - iwl_pcie_rx_handle(trans); - local_bh_enable(); - } - - /* This "Tx" DMA channel is used only for loading uCode */ - if (inta & CSR_INT_BIT_FH_TX) { - iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); - IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); - isr_stats->tx++; - handled |= CSR_INT_BIT_FH_TX; - /* Wake up uCode load routine, now that load is complete */ - trans_pcie->ucode_write_complete = true; - wake_up(&trans_pcie->ucode_write_waitq); - } - - if (inta & ~handled) { - IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); - isr_stats->unhandled++; - } - - if (inta & ~(trans_pcie->inta_mask)) { - IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", - inta & ~trans_pcie->inta_mask); - } - - /* Re-enable all interrupts */ - /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &trans->status)) - iwl_enable_interrupts(trans); - /* Re-enable RF_KILL if it occurred */ - else if (handled & CSR_INT_BIT_RF_KILL) - iwl_enable_rfkill_int(trans); - -out: - lock_map_release(&trans->sync_cmd_lockdep_map); - return IRQ_HANDLED; -} - -/****************************************************************************** - * - * ICT functions - * - ******************************************************************************/ - -/* Free dram table */ -void iwl_pcie_free_ict(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (trans_pcie->ict_tbl) { - dma_free_coherent(trans->dev, ICT_SIZE, - trans_pcie->ict_tbl, - trans_pcie->ict_tbl_dma); - trans_pcie->ict_tbl = NULL; - trans_pcie->ict_tbl_dma = 0; - } -} - -/* - * allocate dram shared table, it is an aligned memory - * block of ICT_SIZE. - * also reset all data related to ICT table interrupt. - */ -int iwl_pcie_alloc_ict(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - trans_pcie->ict_tbl = - dma_zalloc_coherent(trans->dev, ICT_SIZE, - &trans_pcie->ict_tbl_dma, - GFP_KERNEL); - if (!trans_pcie->ict_tbl) - return -ENOMEM; - - /* just an API sanity check ... it is guaranteed to be aligned */ - if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { - iwl_pcie_free_ict(trans); - return -EINVAL; - } - - IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n", - (unsigned long long)trans_pcie->ict_tbl_dma, - trans_pcie->ict_tbl); - - return 0; -} - -/* Device is going up inform it about using ICT interrupt table, - * also we need to tell the driver to start using ICT interrupt. - */ -void iwl_pcie_reset_ict(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 val; - - if (!trans_pcie->ict_tbl) - return; - - spin_lock(&trans_pcie->irq_lock); - iwl_disable_interrupts(trans); - - memset(trans_pcie->ict_tbl, 0, ICT_SIZE); - - val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; - - val |= CSR_DRAM_INT_TBL_ENABLE | - CSR_DRAM_INIT_TBL_WRAP_CHECK | - CSR_DRAM_INIT_TBL_WRITE_POINTER; - - IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); - - iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); - trans_pcie->use_ict = true; - trans_pcie->ict_index = 0; - iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); - iwl_enable_interrupts(trans); - spin_unlock(&trans_pcie->irq_lock); -} - -/* Device is going down disable ict interrupt usage */ -void iwl_pcie_disable_ict(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - spin_lock(&trans_pcie->irq_lock); - trans_pcie->use_ict = false; - spin_unlock(&trans_pcie->irq_lock); -} - -irqreturn_t iwl_pcie_isr(int irq, void *data) -{ - struct iwl_trans *trans = data; - - if (!trans) - return IRQ_NONE; - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. - */ - iwl_write32(trans, CSR_INT_MASK, 0x00000000); - - return IRQ_WAKE_THREAD; -} diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c deleted file mode 100644 index 90283453073c..000000000000 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ /dev/null @@ -1,2825 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "iwl-drv.h" -#include "iwl-trans.h" -#include "iwl-csr.h" -#include "iwl-prph.h" -#include "iwl-scd.h" -#include "iwl-agn-hw.h" -#include "iwl-fw-error-dump.h" -#include "internal.h" -#include "iwl-fh.h" - -/* extended range in FW SRAM */ -#define IWL_FW_MEM_EXTENDED_START 0x40000 -#define IWL_FW_MEM_EXTENDED_END 0x57FFF - -static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (!trans_pcie->fw_mon_page) - return; - - dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, DMA_FROM_DEVICE); - __free_pages(trans_pcie->fw_mon_page, - get_order(trans_pcie->fw_mon_size)); - trans_pcie->fw_mon_page = NULL; - trans_pcie->fw_mon_phys = 0; - trans_pcie->fw_mon_size = 0; -} - -static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct page *page = NULL; - dma_addr_t phys; - u32 size = 0; - u8 power; - - if (!max_power) { - /* default max_power is maximum */ - max_power = 26; - } else { - max_power += 11; - } - - if (WARN(max_power > 26, - "External buffer size for monitor is too big %d, check the FW TLV\n", - max_power)) - return; - - if (trans_pcie->fw_mon_page) { - dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - return; - } - - phys = 0; - for (power = max_power; power >= 11; power--) { - int order; - - size = BIT(power); - order = get_order(size); - page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, - order); - if (!page) - continue; - - phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, phys)) { - __free_pages(page, order); - page = NULL; - continue; - } - IWL_INFO(trans, - "Allocated 0x%08x bytes (order %d) for firmware monitor.\n", - size, order); - break; - } - - if (WARN_ON_ONCE(!page)) - return; - - if (power != max_power) - IWL_ERR(trans, - "Sorry - debug buffer is only %luK while you requested %luK\n", - (unsigned long)BIT(power - 10), - (unsigned long)BIT(max_power - 10)); - - trans_pcie->fw_mon_page = page; - trans_pcie->fw_mon_phys = phys; - trans_pcie->fw_mon_size = size; -} - -static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) -{ - iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, - ((reg & 0x0000ffff) | (2 << 28))); - return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); -} - -static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) -{ - iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); - iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, - ((reg & 0x0000ffff) | (3 << 28))); -} - -static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) -{ - if (trans->cfg->apmg_not_supported) - return; - - if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) - iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - else - iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); -} - -/* PCI registers */ -#define PCI_CFG_RETRY_TIMEOUT 0x041 - -static void iwl_pcie_apm_config(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 lctl; - u16 cap; - - /* - * HW bug W/A for instability in PCIe bus L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. - */ - pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) - iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - else - iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); - - pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); - trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; - dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", - (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", - trans->ltr_enabled ? "En" : "Dis"); -} - -/* - * Start up NIC's basic functionality after it has been reset - * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) - * NOTE: This does not load uCode nor start the embedded processor - */ -static int iwl_pcie_apm_init(struct iwl_trans *trans) -{ - int ret = 0; - IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); - - /* - * Use "set_bit" below rather than "write", to preserve any hardware - * bits already set by default after reset. - */ - - /* Disable L0S exit timer (platform NMI Work/Around) */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); - - /* - * Disable L0s without affecting L1; - * don't wait for ICH L0s (ICH bug W/A) - */ - iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); - - /* Set FH wait threshold to maximum (HW error during stress W/A) */ - iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); - - /* - * Enable HAP INTA (interrupt from management bus) to - * wake device's PCI Express link L1a -> L0s - */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); - - iwl_pcie_apm_config(trans); - - /* Configure analog phase-lock-loop before activating to D0A */ - if (trans->cfg->base_params->pll_cfg_val) - iwl_set_bit(trans, CSR_ANA_PLL_CFG, - trans->cfg->base_params->pll_cfg_val); - - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is supported, e.g. iwl_write_prph() - * and accesses to uCode SRAM. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); - if (ret < 0) { - IWL_DEBUG_INFO(trans, "Failed to init the card\n"); - goto out; - } - - if (trans->cfg->host_interrupt_operation_mode) { - /* - * This is a bit of an abuse - This is needed for 7260 / 3160 - * only check host_interrupt_operation_mode even if this is - * not related to host_interrupt_operation_mode. - * - * Enable the oscillator to count wake up time for L1 exit. This - * consumes slightly more power (100uA) - but allows to be sure - * that we wake up from L1 on time. - * - * This looks weird: read twice the same register, discard the - * value, set a bit, and yet again, read that same register - * just to discard the value. But that's the way the hardware - * seems to like it. - */ - iwl_read_prph(trans, OSC_CLK); - iwl_read_prph(trans, OSC_CLK); - iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); - iwl_read_prph(trans, OSC_CLK); - iwl_read_prph(trans, OSC_CLK); - } - - /* - * Enable DMA clock and wait for it to stabilize. - * - * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" - * bits do not disable clocks. This preserves any hardware - * bits already set by default in "CLK_CTRL_REG" after reset. - */ - if (!trans->cfg->apmg_not_supported) { - iwl_write_prph(trans, APMG_CLK_EN_REG, - APMG_CLK_VAL_DMA_CLK_RQT); - udelay(20); - - /* Disable L1-Active */ - iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); - - /* Clear the interrupt in APMG if the NIC is in RFKILL */ - iwl_write_prph(trans, APMG_RTC_INT_STT_REG, - APMG_RTC_INT_STT_RFKILL); - } - - set_bit(STATUS_DEVICE_ENABLED, &trans->status); - -out: - return ret; -} - -/* - * Enable LP XTAL to avoid HW bug where device may consume much power if - * FW is not loaded after device reset. LP XTAL is disabled by default - * after device HW reset. Do it only if XTAL is fed by internal source. - * Configure device's "persistence" mode to avoid resetting XTAL again when - * SHRD_HW_RST occurs in S3. - */ -static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) -{ - int ret; - u32 apmg_gp1_reg; - u32 apmg_xtal_cfg_reg; - u32 dl_cfg_reg; - - /* Force XTAL ON */ - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_XTAL_ON); - - /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); - - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is possible. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - 25000); - if (WARN_ON(ret < 0)) { - IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); - /* Release XTAL ON request */ - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_XTAL_ON); - return; - } - - /* - * Clear "disable persistence" to avoid LP XTAL resetting when - * SHRD_HW_RST is applied in S3. - */ - iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_PERSIST_DIS); - - /* - * Force APMG XTAL to be active to prevent its disabling by HW - * caused by APMG idle state. - */ - apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, - SHR_APMG_XTAL_CFG_REG); - iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, - apmg_xtal_cfg_reg | - SHR_APMG_XTAL_CFG_XTAL_ON_REQ); - - /* - * Reset entire device again - do controller reset (results in - * SHRD_HW_RST). Turn MAC off before proceeding. - */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); - - /* Enable LP XTAL by indirect access through CSR */ - apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); - iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | - SHR_APMG_GP1_WF_XTAL_LP_EN | - SHR_APMG_GP1_CHICKEN_BIT_SELECT); - - /* Clear delay line clock power up */ - dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); - iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & - ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); - - /* - * Enable persistence mode to avoid LP XTAL resetting when - * SHRD_HW_RST is applied in S3. - */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PERSIST_MODE); - - /* - * Clear "initialization complete" bit to move adapter from - * D0A* (powered-up Active) --> D0U* (Uninitialized) state. - */ - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - /* Activates XTAL resources monitor */ - __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, - CSR_MONITOR_XTAL_RESOURCES); - - /* Release XTAL ON request */ - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_XTAL_ON); - udelay(10); - - /* Release APMG XTAL */ - iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, - apmg_xtal_cfg_reg & - ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); -} - -static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) -{ - int ret = 0; - - /* stop device's busmaster DMA activity */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); - - ret = iwl_poll_bit(trans, CSR_RESET, - CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); - if (ret < 0) - IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); - - IWL_DEBUG_INFO(trans, "stop master\n"); - - return ret; -} - -static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) -{ - IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); - - if (op_mode_leave) { - if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - iwl_pcie_apm_init(trans); - - /* inform ME that we are leaving */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) - iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE | - CSR_HW_IF_CONFIG_REG_ENABLE_PME); - mdelay(1); - iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - } - mdelay(5); - } - - clear_bit(STATUS_DEVICE_ENABLED, &trans->status); - - /* Stop device's DMA activity */ - iwl_pcie_apm_stop_master(trans); - - if (trans->cfg->lp_xtal_workaround) { - iwl_pcie_apm_lp_xtal_enable(trans); - return; - } - - /* Reset the entire device */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); - - /* - * Clear "initialization complete" bit to move adapter from - * D0A* (powered-up Active) --> D0U* (Uninitialized) state. - */ - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); -} - -static int iwl_pcie_nic_init(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - /* nic_init */ - spin_lock(&trans_pcie->irq_lock); - iwl_pcie_apm_init(trans); - - spin_unlock(&trans_pcie->irq_lock); - - iwl_pcie_set_pwr(trans, false); - - iwl_op_mode_nic_config(trans->op_mode); - - /* Allocate the RX queue, or reset if it is already allocated */ - iwl_pcie_rx_init(trans); - - /* Allocate or reset and init all Tx and Command queues */ - if (iwl_pcie_tx_init(trans)) - return -ENOMEM; - - if (trans->cfg->base_params->shadow_reg_enable) { - /* enable shadow regs in HW */ - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); - IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); - } - - return 0; -} - -#define HW_READY_TIMEOUT (50) - -/* Note: returns poll_bit return value, which is >= 0 if success */ -static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) -{ - int ret; - - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); - - /* See if we got it */ - ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - HW_READY_TIMEOUT); - - if (ret >= 0) - iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); - - IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); - return ret; -} - -/* Note: returns standard 0/-ERROR code */ -static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) -{ - int ret; - int t = 0; - int iter; - - IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); - - ret = iwl_pcie_set_hw_ready(trans); - /* If the card is ready, exit 0 */ - if (ret >= 0) - return 0; - - iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - msleep(1); - - for (iter = 0; iter < 10; iter++) { - /* If HW is not ready, prepare the conditions to check again */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); - - do { - ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; - - usleep_range(200, 1000); - t += 200; - } while (t < 150000); - msleep(25); - } - - IWL_ERR(trans, "Couldn't prepare the card\n"); - - return ret; -} - -/* - * ucode - */ -static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, - dma_addr_t phy_addr, u32 byte_cnt) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - trans_pcie->ucode_write_complete = false; - - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); - - iwl_write_direct32(trans, - FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), - dst_addr); - - iwl_write_direct32(trans, - FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), - phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); - - iwl_write_direct32(trans, - FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), - (iwl_get_dma_hi_addr(phy_addr) - << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); - - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | - FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); - - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | - FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); - - ret = wait_event_timeout(trans_pcie->ucode_write_waitq, - trans_pcie->ucode_write_complete, 5 * HZ); - if (!ret) { - IWL_ERR(trans, "Failed to load firmware chunk!\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, - const struct fw_desc *section) -{ - u8 *v_addr; - dma_addr_t p_addr; - u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); - int ret = 0; - - IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", - section_num); - - v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, - GFP_KERNEL | __GFP_NOWARN); - if (!v_addr) { - IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); - chunk_sz = PAGE_SIZE; - v_addr = dma_alloc_coherent(trans->dev, chunk_sz, - &p_addr, GFP_KERNEL); - if (!v_addr) - return -ENOMEM; - } - - for (offset = 0; offset < section->len; offset += chunk_sz) { - u32 copy_size, dst_addr; - bool extended_addr = false; - - copy_size = min_t(u32, chunk_sz, section->len - offset); - dst_addr = section->offset + offset; - - if (dst_addr >= IWL_FW_MEM_EXTENDED_START && - dst_addr <= IWL_FW_MEM_EXTENDED_END) - extended_addr = true; - - if (extended_addr) - iwl_set_bits_prph(trans, LMPM_CHICK, - LMPM_CHICK_EXTENDED_ADDR_SPACE); - - memcpy(v_addr, (u8 *)section->data + offset, copy_size); - ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, - copy_size); - - if (extended_addr) - iwl_clear_bits_prph(trans, LMPM_CHICK, - LMPM_CHICK_EXTENDED_ADDR_SPACE); - - if (ret) { - IWL_ERR(trans, - "Could not load the [%d] uCode section\n", - section_num); - break; - } - } - - dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); - return ret; -} - -/* - * Driver Takes the ownership on secure machine before FW load - * and prevent race with the BT load. - * W/A for ROM bug. (should be remove in the next Si step) - */ -static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) -{ - u32 val, loop = 1000; - - /* - * Check the RSA semaphore is accessible. - * If the HW isn't locked and the rsa semaphore isn't accessible, - * we are in trouble. - */ - val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); - if (val & (BIT(1) | BIT(17))) { - IWL_INFO(trans, - "can't access the RSA semaphore it is write protected\n"); - return 0; - } - - /* take ownership on the AUX IF */ - iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); - iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); - - do { - iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); - val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); - if (val == 0x1) { - iwl_write_prph(trans, RSA_ENABLE, 0); - return 0; - } - - udelay(10); - loop--; - } while (loop > 0); - - IWL_ERR(trans, "Failed to take ownership on secure machine\n"); - return -EIO; -} - -static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, - const struct fw_img *image, - int cpu, - int *first_ucode_section) -{ - int shift_param; - int i, ret = 0, sec_num = 0x1; - u32 val, last_read_idx = 0; - - if (cpu == 1) { - shift_param = 0; - *first_ucode_section = 0; - } else { - shift_param = 16; - (*first_ucode_section)++; - } - - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { - last_read_idx = i; - - /* - * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between - * CPU1 to CPU2. - * PAGING_SEPARATOR_SECTION delimiter - separate between - * CPU2 non paged to CPU2 paging sec. - */ - if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || - image->sec[i].offset == PAGING_SEPARATOR_SECTION) { - IWL_DEBUG_FW(trans, - "Break since Data not valid or Empty section, sec = %d\n", - i); - break; - } - - ret = iwl_pcie_load_section(trans, i, &image->sec[i]); - if (ret) - return ret; - - /* Notify the ucode of the loaded section number and status */ - val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); - val = val | (sec_num << shift_param); - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); - sec_num = (sec_num << 1) | 0x1; - } - - *first_ucode_section = last_read_idx; - - if (cpu == 1) - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); - else - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); - - return 0; -} - -static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, - const struct fw_img *image, - int cpu, - int *first_ucode_section) -{ - int shift_param; - int i, ret = 0; - u32 last_read_idx = 0; - - if (cpu == 1) { - shift_param = 0; - *first_ucode_section = 0; - } else { - shift_param = 16; - (*first_ucode_section)++; - } - - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { - last_read_idx = i; - - /* - * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between - * CPU1 to CPU2. - * PAGING_SEPARATOR_SECTION delimiter - separate between - * CPU2 non paged to CPU2 paging sec. - */ - if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || - image->sec[i].offset == PAGING_SEPARATOR_SECTION) { - IWL_DEBUG_FW(trans, - "Break since Data not valid or Empty section, sec = %d\n", - i); - break; - } - - ret = iwl_pcie_load_section(trans, i, &image->sec[i]); - if (ret) - return ret; - } - - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - iwl_set_bits_prph(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - (LMPM_CPU_UCODE_LOADING_COMPLETED | - LMPM_CPU_HDRS_LOADING_COMPLETED | - LMPM_CPU_UCODE_LOADING_STARTED) << - shift_param); - - *first_ucode_section = last_read_idx; - - return 0; -} - -static void iwl_pcie_apply_destination(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; - int i; - - if (dest->version) - IWL_ERR(trans, - "DBG DEST version is %d - expect issues\n", - dest->version); - - IWL_INFO(trans, "Applying debug destination %s\n", - get_fw_dbg_mode_string(dest->monitor_mode)); - - if (dest->monitor_mode == EXTERNAL_MODE) - iwl_pcie_alloc_fw_monitor(trans, dest->size_power); - else - IWL_WARN(trans, "PCI should have external buffer debug\n"); - - for (i = 0; i < trans->dbg_dest_reg_num; i++) { - u32 addr = le32_to_cpu(dest->reg_ops[i].addr); - u32 val = le32_to_cpu(dest->reg_ops[i].val); - - switch (dest->reg_ops[i].op) { - case CSR_ASSIGN: - iwl_write32(trans, addr, val); - break; - case CSR_SETBIT: - iwl_set_bit(trans, addr, BIT(val)); - break; - case CSR_CLEARBIT: - iwl_clear_bit(trans, addr, BIT(val)); - break; - case PRPH_ASSIGN: - iwl_write_prph(trans, addr, val); - break; - case PRPH_SETBIT: - iwl_set_bits_prph(trans, addr, BIT(val)); - break; - case PRPH_CLEARBIT: - iwl_clear_bits_prph(trans, addr, BIT(val)); - break; - case PRPH_BLOCKBIT: - if (iwl_read_prph(trans, addr) & BIT(val)) { - IWL_ERR(trans, - "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", - val, addr); - goto monitor; - } - break; - default: - IWL_ERR(trans, "FW debug - unknown OP %d\n", - dest->reg_ops[i].op); - break; - } - } - -monitor: - if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { - iwl_write_prph(trans, le32_to_cpu(dest->base_reg), - trans_pcie->fw_mon_phys >> dest->base_shift); - iwl_write_prph(trans, le32_to_cpu(dest->end_reg), - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size) >> dest->end_shift); - } -} - -static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, - const struct fw_img *image) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret = 0; - int first_ucode_section; - - IWL_DEBUG_FW(trans, "working with %s CPU\n", - image->is_dual_cpus ? "Dual" : "Single"); - - /* load to FW the binary non secured sections of CPU1 */ - ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); - if (ret) - return ret; - - if (image->is_dual_cpus) { - /* set CPU2 header address */ - iwl_write_prph(trans, - LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, - LMPM_SECURE_CPU2_HDR_MEM_SPACE); - - /* load to FW the binary sections of CPU2 */ - ret = iwl_pcie_load_cpu_sections(trans, image, 2, - &first_ucode_section); - if (ret) - return ret; - } - - /* supported for 7000 only for the moment */ - if (iwlwifi_mod_params.fw_monitor && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_pcie_alloc_fw_monitor(trans, 0); - - if (trans_pcie->fw_mon_size) { - iwl_write_prph(trans, MON_BUFF_BASE_ADDR, - trans_pcie->fw_mon_phys >> 4); - iwl_write_prph(trans, MON_BUFF_END_ADDR, - (trans_pcie->fw_mon_phys + - trans_pcie->fw_mon_size) >> 4); - } - } else if (trans->dbg_dest_tlv) { - iwl_pcie_apply_destination(trans); - } - - /* release CPU reset */ - iwl_write32(trans, CSR_RESET, 0); - - return 0; -} - -static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, - const struct fw_img *image) -{ - int ret = 0; - int first_ucode_section; - - IWL_DEBUG_FW(trans, "working with %s CPU\n", - image->is_dual_cpus ? "Dual" : "Single"); - - if (trans->dbg_dest_tlv) - iwl_pcie_apply_destination(trans); - - /* TODO: remove in the next Si step */ - ret = iwl_pcie_rsa_race_bug_wa(trans); - if (ret) - return ret; - - /* configure the ucode to be ready to get the secured image */ - /* release CPU reset */ - iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); - - /* load to FW the binary Secured sections of CPU1 */ - ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, - &first_ucode_section); - if (ret) - return ret; - - /* load to FW the binary sections of CPU2 */ - return iwl_pcie_load_cpu_sections_8000(trans, image, 2, - &first_ucode_section); -} - -static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; - int ret; - - mutex_lock(&trans_pcie->mutex); - - /* Someone called stop_device, don't try to start_fw */ - if (trans_pcie->is_down) { - IWL_WARN(trans, - "Can't start_fw since the HW hasn't been started\n"); - ret = EIO; - goto out; - } - - /* This may fail if AMT took ownership of the device */ - if (iwl_pcie_prepare_card_hw(trans)) { - IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; - } - - iwl_enable_rfkill_int(trans); - - /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - iwl_trans_pcie_rf_kill(trans, hw_rfkill); - if (hw_rfkill && !run_in_rfkill) { - ret = -ERFKILL; - goto out; - } - - iwl_write32(trans, CSR_INT, 0xFFFFFFFF); - - ret = iwl_pcie_nic_init(trans); - if (ret) { - IWL_ERR(trans, "Unable to init nic\n"); - goto out; - } - - /* make sure rfkill handshake bits are cleared */ - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - /* clear (again), then enable host interrupts */ - iwl_write32(trans, CSR_INT, 0xFFFFFFFF); - iwl_enable_interrupts(trans); - - /* really make sure rfkill handshake bits are cleared */ - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - - /* Load the given image to the HW */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - ret = iwl_pcie_load_given_ucode_8000(trans, fw); - else - ret = iwl_pcie_load_given_ucode(trans, fw); - -out: - mutex_unlock(&trans_pcie->mutex); - return ret; -} - -static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) -{ - iwl_pcie_reset_ict(trans); - iwl_pcie_tx_start(trans, scd_addr); -} - -static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill, was_hw_rfkill; - - lockdep_assert_held(&trans_pcie->mutex); - - if (trans_pcie->is_down) - return; - - trans_pcie->is_down = true; - - was_hw_rfkill = iwl_is_rfkill_set(trans); - - /* tell the device to stop sending interrupts */ - spin_lock(&trans_pcie->irq_lock); - iwl_disable_interrupts(trans); - spin_unlock(&trans_pcie->irq_lock); - - /* device going down, Stop using ICT table */ - iwl_pcie_disable_ict(trans); - - /* - * If a HW restart happens during firmware loading, - * then the firmware loading might call this function - * and later it might be called again due to the - * restart. So don't process again if the device is - * already dead. - */ - if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { - IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); - iwl_pcie_tx_stop(trans); - iwl_pcie_rx_stop(trans); - - /* Power-down device's busmaster DMA clocks */ - if (!trans->cfg->apmg_not_supported) { - iwl_write_prph(trans, APMG_CLK_DIS_REG, - APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); - } - } - - /* Make sure (redundant) we've released our request to stay awake */ - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* Stop the device, and put it in low power state */ - iwl_pcie_apm_stop(trans, false); - - /* stop and reset the on-board processor */ - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - udelay(20); - - /* - * Upon stop, the APM issues an interrupt if HW RF kill is set. - * This is a bug in certain verions of the hardware. - * Certain devices also keep sending HW RF kill interrupt all - * the time, unless the interrupt is ACKed even if the interrupt - * should be masked. Re-ACK all the interrupts here. - */ - spin_lock(&trans_pcie->irq_lock); - iwl_disable_interrupts(trans); - spin_unlock(&trans_pcie->irq_lock); - - - /* clear all status bits */ - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - clear_bit(STATUS_INT_ENABLED, &trans->status); - clear_bit(STATUS_TPOWER_PMI, &trans->status); - clear_bit(STATUS_RFKILL, &trans->status); - - /* - * Even if we stop the HW, we still want the RF kill - * interrupt - */ - iwl_enable_rfkill_int(trans); - - /* - * Check again since the RF kill state may have changed while - * all the interrupts were disabled, in this case we couldn't - * receive the RF kill interrupt and update the state in the - * op_mode. - * Don't call the op_mode if the rkfill state hasn't changed. - * This allows the op_mode to call stop_device from the rfkill - * notification without endless recursion. Under very rare - * circumstances, we might have a small recursion if the rfkill - * state changed exactly now while we were called from stop_device. - * This is very unlikely but can happen and is supported. - */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - if (hw_rfkill != was_hw_rfkill) - iwl_trans_pcie_rf_kill(trans, hw_rfkill); - - /* re-take ownership to prevent other users from stealing the deivce */ - iwl_pcie_prepare_card_hw(trans); -} - -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - mutex_lock(&trans_pcie->mutex); - _iwl_trans_pcie_stop_device(trans, low_power); - mutex_unlock(&trans_pcie->mutex); -} - -void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) -{ - struct iwl_trans_pcie __maybe_unused *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - lockdep_assert_held(&trans_pcie->mutex); - - if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - _iwl_trans_pcie_stop_device(trans, true); -} - -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (trans->wowlan_d0i3) { - /* Enable persistence mode to avoid reset */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PERSIST_MODE); - } - - iwl_disable_interrupts(trans); - - /* - * in testing mode, the host stays awake and the - * hardware won't be reset (not even partially) - */ - if (test) - return; - - iwl_pcie_disable_ict(trans); - - synchronize_irq(trans_pcie->pci_dev->irq); - - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - if (!trans->wowlan_d0i3) { - /* - * reset TX queues -- some of their registers reset during S3 - * so if we don't reset everything here the D3 image would try - * to execute some invalid memory upon resume - */ - iwl_trans_pcie_tx_reset(trans); - } - - iwl_pcie_set_pwr(trans, true); -} - -static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status, - bool test) -{ - u32 val; - int ret; - - if (test) { - iwl_enable_interrupts(trans); - *status = IWL_D3_STATUS_ALIVE; - return 0; - } - - /* - * Also enables interrupts - none will happen as the device doesn't - * know we're waking it up, only when the opmode actually tells it - * after this call. - */ - iwl_pcie_reset_ict(trans); - - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - 25000); - if (ret < 0) { - IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); - return ret; - } - - iwl_pcie_set_pwr(trans, false); - - if (trans->wowlan_d0i3) { - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - } else { - iwl_trans_pcie_tx_reset(trans); - - ret = iwl_pcie_rx_init(trans); - if (ret) { - IWL_ERR(trans, - "Failed to resume the device (RX reset)\n"); - return ret; - } - } - - val = iwl_read32(trans, CSR_RESET); - if (val & CSR_RESET_REG_FLAG_NEVO_RESET) - *status = IWL_D3_STATUS_RESET; - else - *status = IWL_D3_STATUS_ALIVE; - - return 0; -} - -static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; - int err; - - lockdep_assert_held(&trans_pcie->mutex); - - err = iwl_pcie_prepare_card_hw(trans); - if (err) { - IWL_ERR(trans, "Error while preparing HW: %d\n", err); - return err; - } - - /* Reset the entire device */ - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - usleep_range(10, 15); - - iwl_pcie_apm_init(trans); - - /* From now on, the op_mode will be kept updated about RF kill state */ - iwl_enable_rfkill_int(trans); - - /* Set is_down to false here so that...*/ - trans_pcie->is_down = false; - - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - /* ... rfkill can call stop_device and set it false if needed */ - iwl_trans_pcie_rf_kill(trans, hw_rfkill); - - return 0; -} - -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - mutex_lock(&trans_pcie->mutex); - ret = _iwl_trans_pcie_start_hw(trans, low_power); - mutex_unlock(&trans_pcie->mutex); - - return ret; -} - -static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - mutex_lock(&trans_pcie->mutex); - - /* disable interrupts - don't enable HW RF kill interrupt */ - spin_lock(&trans_pcie->irq_lock); - iwl_disable_interrupts(trans); - spin_unlock(&trans_pcie->irq_lock); - - iwl_pcie_apm_stop(trans, true); - - spin_lock(&trans_pcie->irq_lock); - iwl_disable_interrupts(trans); - spin_unlock(&trans_pcie->irq_lock); - - iwl_pcie_disable_ict(trans); - - mutex_unlock(&trans_pcie->mutex); - - synchronize_irq(trans_pcie->pci_dev->irq); -} - -static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) -{ - writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); -} - -static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) -{ - writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); -} - -static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) -{ - return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); -} - -static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) -{ - iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, - ((reg & 0x000FFFFF) | (3 << 24))); - return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); -} - -static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, - u32 val) -{ - iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, - ((addr & 0x000FFFFF) | (3 << 24))); - iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); -} - -static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) -{ - WARN_ON(1); - return 0; -} - -static void iwl_trans_pcie_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - trans_pcie->cmd_queue = trans_cfg->cmd_queue; - trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; - trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; - if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) - trans_pcie->n_no_reclaim_cmds = 0; - else - trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; - if (trans_pcie->n_no_reclaim_cmds) - memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, - trans_pcie->n_no_reclaim_cmds * sizeof(u8)); - - trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; - if (trans_pcie->rx_buf_size_8k) - trans_pcie->rx_page_order = get_order(8 * 1024); - else - trans_pcie->rx_page_order = get_order(4 * 1024); - - trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; - trans_pcie->command_names = trans_cfg->command_names; - trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; - trans_pcie->scd_set_active = trans_cfg->scd_set_active; - - /* init ref_count to 1 (should be cleared when ucode is loaded) */ - trans_pcie->ref_count = 1; - - /* Initialize NAPI here - it should be before registering to mac80211 - * in the opmode but after the HW struct is allocated. - * As this function may be called again in some corner cases don't - * do anything if NAPI was already initialized. - */ - if (!trans_pcie->napi.poll) { - init_dummy_netdev(&trans_pcie->napi_dev); - netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, - iwl_pcie_dummy_napi_poll, 64); - } -} - -void iwl_trans_pcie_free(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - synchronize_irq(trans_pcie->pci_dev->irq); - - iwl_pcie_tx_free(trans); - iwl_pcie_rx_free(trans); - - free_irq(trans_pcie->pci_dev->irq, trans); - iwl_pcie_free_ict(trans); - - pci_disable_msi(trans_pcie->pci_dev); - iounmap(trans_pcie->hw_base); - pci_release_regions(trans_pcie->pci_dev); - pci_disable_device(trans_pcie->pci_dev); - - if (trans_pcie->napi.poll) - netif_napi_del(&trans_pcie->napi); - - iwl_pcie_free_fw_monitor(trans); - - iwl_trans_free(trans); -} - -static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) -{ - if (state) - set_bit(STATUS_TPOWER_PMI, &trans->status); - else - clear_bit(STATUS_TPOWER_PMI, &trans->status); -} - -static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, - unsigned long *flags) -{ - int ret; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - spin_lock_irqsave(&trans_pcie->reg_lock, *flags); - - if (trans_pcie->cmd_hold_nic_awake) - goto out; - - /* this bit wakes up the NIC */ - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); - - /* - * These bits say the device is running, and should keep running for - * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), - * but they do not indicate that embedded SRAM is restored yet; - * 3945 and 4965 have volatile SRAM, and must save/restore contents - * to/from host DRAM when sleeping/waking for power-saving. - * Each direction takes approximately 1/4 millisecond; with this - * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a - * series of register accesses are expected (e.g. reading Event Log), - * to keep device from sleeping. - * - * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that - * SRAM is okay/restored. We don't check that here because this call - * is just for hardware register access; but GP1 MAC_SLEEP check is a - * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). - * - * 5000 series and later (including 1000 series) have non-volatile SRAM, - * and do not save/restore SRAM when power cycling. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); - if (unlikely(ret < 0)) { - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); - if (!silent) { - u32 val = iwl_read32(trans, CSR_GP_CNTRL); - WARN_ONCE(1, - "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", - val); - spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); - return false; - } - } - -out: - /* - * Fool sparse by faking we release the lock - sparse will - * track nic_access anyway. - */ - __release(&trans_pcie->reg_lock); - return true; -} - -static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, - unsigned long *flags) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - lockdep_assert_held(&trans_pcie->reg_lock); - - /* - * Fool sparse by faking we acquiring the lock - sparse will - * track nic_access anyway. - */ - __acquire(&trans_pcie->reg_lock); - - if (trans_pcie->cmd_hold_nic_awake) - goto out; - - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - /* - * Above we read the CSR_GP_CNTRL register, which will flush - * any previous writes, but we need the write that clears the - * MAC_ACCESS_REQ bit to be performed before any other writes - * scheduled on different CPUs (after we drop reg_lock). - */ - mmiowb(); -out: - spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); -} - -static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) -{ - unsigned long flags; - int offs, ret = 0; - u32 *vals = buf; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); - for (offs = 0; offs < dwords; offs++) - vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - iwl_trans_release_nic_access(trans, &flags); - } else { - ret = -EBUSY; - } - return ret; -} - -static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) -{ - unsigned long flags; - int offs, ret = 0; - const u32 *vals = buf; - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); - for (offs = 0; offs < dwords; offs++) - iwl_write32(trans, HBUS_TARG_MEM_WDAT, - vals ? vals[offs] : 0); - iwl_trans_release_nic_access(trans, &flags); - } else { - ret = -EBUSY; - } - return ret; -} - -static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, - unsigned long txqs, - bool freeze) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int queue; - - for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = &trans_pcie->txq[queue]; - unsigned long now; - - spin_lock_bh(&txq->lock); - - now = jiffies; - - if (txq->frozen == freeze) - goto next_queue; - - IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", - freeze ? "Freezing" : "Waking", queue); - - txq->frozen = freeze; - - if (txq->q.read_ptr == txq->q.write_ptr) - goto next_queue; - - if (freeze) { - if (unlikely(time_after(now, - txq->stuck_timer.expires))) { - /* - * The timer should have fired, maybe it is - * spinning right now on the lock. - */ - goto next_queue; - } - /* remember how long until the timer fires */ - txq->frozen_expiry_remainder = - txq->stuck_timer.expires - now; - del_timer(&txq->stuck_timer); - goto next_queue; - } - - /* - * Wake a non-empty queue -> arm timer with the - * remainder before it froze - */ - mod_timer(&txq->stuck_timer, - now + txq->frozen_expiry_remainder); - -next_queue: - spin_unlock_bh(&txq->lock); - } -} - -#define IWL_FLUSH_WAIT_MS 2000 - -static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq; - struct iwl_queue *q; - int cnt; - unsigned long now = jiffies; - u32 scd_sram_addr; - u8 buf[16]; - int ret = 0; - - /* waiting for all the tx frames complete might take a while */ - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - u8 wr_ptr; - - if (cnt == trans_pcie->cmd_queue) - continue; - if (!test_bit(cnt, trans_pcie->queue_used)) - continue; - if (!(BIT(cnt) & txq_bm)) - continue; - - IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); - txq = &trans_pcie->txq[cnt]; - q = &txq->q; - wr_ptr = ACCESS_ONCE(q->write_ptr); - - while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && - !time_after(jiffies, - now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { - u8 write_ptr = ACCESS_ONCE(q->write_ptr); - - if (WARN_ONCE(wr_ptr != write_ptr, - "WR pointer moved while flushing %d -> %d\n", - wr_ptr, write_ptr)) - return -ETIMEDOUT; - msleep(1); - } - - if (q->read_ptr != q->write_ptr) { - IWL_ERR(trans, - "fail to flush all tx fifo queues Q %d\n", cnt); - ret = -ETIMEDOUT; - break; - } - IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); - } - - if (!ret) - return 0; - - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->q.read_ptr, txq->q.write_ptr); - - scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); - iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, - iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); - - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); - - if (cnt & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; - - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - cnt, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & - (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); - } - - return ret; -} - -static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, - u32 mask, u32 value) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - - spin_lock_irqsave(&trans_pcie->reg_lock, flags); - __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); -} - -void iwl_trans_pcie_ref(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - - if (iwlwifi_mod_params.d0i3_disable) - return; - - spin_lock_irqsave(&trans_pcie->ref_lock, flags); - IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); - trans_pcie->ref_count++; - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); -} - -void iwl_trans_pcie_unref(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - - if (iwlwifi_mod_params.d0i3_disable) - return; - - spin_lock_irqsave(&trans_pcie->ref_lock, flags); - IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); - if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) { - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); - return; - } - trans_pcie->ref_count--; - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); -} - -static const char *get_csr_string(int cmd) -{ -#define IWL_CMD(x) case x: return #x - switch (cmd) { - IWL_CMD(CSR_HW_IF_CONFIG_REG); - IWL_CMD(CSR_INT_COALESCING); - IWL_CMD(CSR_INT); - IWL_CMD(CSR_INT_MASK); - IWL_CMD(CSR_FH_INT_STATUS); - IWL_CMD(CSR_GPIO_IN); - IWL_CMD(CSR_RESET); - IWL_CMD(CSR_GP_CNTRL); - IWL_CMD(CSR_HW_REV); - IWL_CMD(CSR_EEPROM_REG); - IWL_CMD(CSR_EEPROM_GP); - IWL_CMD(CSR_OTP_GP_REG); - IWL_CMD(CSR_GIO_REG); - IWL_CMD(CSR_GP_UCODE_REG); - IWL_CMD(CSR_GP_DRIVER_REG); - IWL_CMD(CSR_UCODE_DRV_GP1); - IWL_CMD(CSR_UCODE_DRV_GP2); - IWL_CMD(CSR_LED_REG); - IWL_CMD(CSR_DRAM_INT_TBL_REG); - IWL_CMD(CSR_GIO_CHICKEN_BITS); - IWL_CMD(CSR_ANA_PLL_CFG); - IWL_CMD(CSR_HW_REV_WA_REG); - IWL_CMD(CSR_MONITOR_STATUS_REG); - IWL_CMD(CSR_DBG_HPET_MEM_REG); - default: - return "UNKNOWN"; - } -#undef IWL_CMD -} - -void iwl_pcie_dump_csr(struct iwl_trans *trans) -{ - int i; - static const u32 csr_tbl[] = { - CSR_HW_IF_CONFIG_REG, - CSR_INT_COALESCING, - CSR_INT, - CSR_INT_MASK, - CSR_FH_INT_STATUS, - CSR_GPIO_IN, - CSR_RESET, - CSR_GP_CNTRL, - CSR_HW_REV, - CSR_EEPROM_REG, - CSR_EEPROM_GP, - CSR_OTP_GP_REG, - CSR_GIO_REG, - CSR_GP_UCODE_REG, - CSR_GP_DRIVER_REG, - CSR_UCODE_DRV_GP1, - CSR_UCODE_DRV_GP2, - CSR_LED_REG, - CSR_DRAM_INT_TBL_REG, - CSR_GIO_CHICKEN_BITS, - CSR_ANA_PLL_CFG, - CSR_MONITOR_STATUS_REG, - CSR_HW_REV_WA_REG, - CSR_DBG_HPET_MEM_REG - }; - IWL_ERR(trans, "CSR values:\n"); - IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " - "CSR_INT_PERIODIC_REG)\n"); - for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { - IWL_ERR(trans, " %25s: 0X%08x\n", - get_csr_string(csr_tbl[i]), - iwl_read32(trans, csr_tbl[i])); - } -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -/* create and remove of files */ -#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, trans, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ -} while (0) - -/* file operation */ -#define DEBUGFS_READ_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_WRITE_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .write = iwl_dbgfs_##name##_write, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - -#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ -static const struct file_operations iwl_dbgfs_##name##_ops = { \ - .write = iwl_dbgfs_##name##_write, \ - .read = iwl_dbgfs_##name##_read, \ - .open = simple_open, \ - .llseek = generic_file_llseek, \ -}; - -static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq; - struct iwl_queue *q; - char *buf; - int pos = 0; - int cnt; - int ret; - size_t bufsz; - - bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; - - if (!trans_pcie->txq) - return -EAGAIN; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - txq = &trans_pcie->txq[cnt]; - q = &txq->q; - pos += scnprintf(buf + pos, bufsz - pos, - "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", - cnt, q->read_ptr, q->write_ptr, - !!test_bit(cnt, trans_pcie->queue_used), - !!test_bit(cnt, trans_pcie->queue_stopped), - txq->need_update, txq->frozen, - (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); - } - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", - rxq->read); - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", - rxq->write); - pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", - rxq->write_actual); - pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", - rxq->need_update); - pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", - rxq->free_count); - if (rxq->rb_stts) { - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "closed_rb_num: Not Allocated\n"); - } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_interrupt_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct isr_statistics *isr_stats = &trans_pcie->isr_stats; - - int pos = 0; - char *buf; - int bufsz = 24 * 64; /* 24 items * 64 char per item */ - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, - "Interrupt Statistics Report:\n"); - - pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", - isr_stats->hw); - pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", - isr_stats->sw); - if (isr_stats->sw || isr_stats->hw) { - pos += scnprintf(buf + pos, bufsz - pos, - "\tLast Restarting Code: 0x%X\n", - isr_stats->err_code); - } -#ifdef CONFIG_IWLWIFI_DEBUG - pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", - isr_stats->sch); - pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", - isr_stats->alive); -#endif - pos += scnprintf(buf + pos, bufsz - pos, - "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); - - pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", - isr_stats->ctkill); - - pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", - isr_stats->wakeup); - - pos += scnprintf(buf + pos, bufsz - pos, - "Rx command responses:\t\t %u\n", isr_stats->rx); - - pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", - isr_stats->tx); - - pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", - isr_stats->unhandled); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_interrupt_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct isr_statistics *isr_stats = &trans_pcie->isr_stats; - - char buf[8]; - int buf_size; - u32 reset_flag; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &reset_flag) != 1) - return -EFAULT; - if (reset_flag == 0) - memset(isr_stats, 0, sizeof(*isr_stats)); - - return count; -} - -static ssize_t iwl_dbgfs_csr_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - char buf[8]; - int buf_size; - int csr; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &csr) != 1) - return -EFAULT; - - iwl_pcie_dump_csr(trans); - - return count; -} - -static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - char *buf = NULL; - ssize_t ret; - - ret = iwl_dump_fh(trans, &buf); - if (ret < 0) - return ret; - if (!buf) - return -EINVAL; - ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); - kfree(buf); - return ret; -} - -DEBUGFS_READ_WRITE_FILE_OPS(interrupt); -DEBUGFS_READ_FILE_OPS(fh_reg); -DEBUGFS_READ_FILE_OPS(rx_queue); -DEBUGFS_READ_FILE_OPS(tx_queue); -DEBUGFS_WRITE_FILE_OPS(csr); - -/* - * Create the debugfs files and directories - * - */ -static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) -{ - DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); - DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); - DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); - DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); - return 0; - -err: - IWL_ERR(trans, "failed to create the trans debugfs entry\n"); - return -ENOMEM; -} -#else -static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) -{ - return 0; -} -#endif /*CONFIG_IWLWIFI_DEBUGFS */ - -static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) -{ - u32 cmdlen = 0; - int i; - - for (i = 0; i < IWL_NUM_OF_TBS; i++) - cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); - - return cmdlen; -} - -static const struct { - u32 start, end; -} iwl_prph_dump_addr[] = { - { .start = 0x00a00000, .end = 0x00a00000 }, - { .start = 0x00a0000c, .end = 0x00a00024 }, - { .start = 0x00a0002c, .end = 0x00a0003c }, - { .start = 0x00a00410, .end = 0x00a00418 }, - { .start = 0x00a00420, .end = 0x00a00420 }, - { .start = 0x00a00428, .end = 0x00a00428 }, - { .start = 0x00a00430, .end = 0x00a0043c }, - { .start = 0x00a00444, .end = 0x00a00444 }, - { .start = 0x00a004c0, .end = 0x00a004cc }, - { .start = 0x00a004d8, .end = 0x00a004d8 }, - { .start = 0x00a004e0, .end = 0x00a004f0 }, - { .start = 0x00a00840, .end = 0x00a00840 }, - { .start = 0x00a00850, .end = 0x00a00858 }, - { .start = 0x00a01004, .end = 0x00a01008 }, - { .start = 0x00a01010, .end = 0x00a01010 }, - { .start = 0x00a01018, .end = 0x00a01018 }, - { .start = 0x00a01024, .end = 0x00a01024 }, - { .start = 0x00a0102c, .end = 0x00a01034 }, - { .start = 0x00a0103c, .end = 0x00a01040 }, - { .start = 0x00a01048, .end = 0x00a01094 }, - { .start = 0x00a01c00, .end = 0x00a01c20 }, - { .start = 0x00a01c58, .end = 0x00a01c58 }, - { .start = 0x00a01c7c, .end = 0x00a01c7c }, - { .start = 0x00a01c28, .end = 0x00a01c54 }, - { .start = 0x00a01c5c, .end = 0x00a01c5c }, - { .start = 0x00a01c60, .end = 0x00a01cdc }, - { .start = 0x00a01ce0, .end = 0x00a01d0c }, - { .start = 0x00a01d18, .end = 0x00a01d20 }, - { .start = 0x00a01d2c, .end = 0x00a01d30 }, - { .start = 0x00a01d40, .end = 0x00a01d5c }, - { .start = 0x00a01d80, .end = 0x00a01d80 }, - { .start = 0x00a01d98, .end = 0x00a01d9c }, - { .start = 0x00a01da8, .end = 0x00a01da8 }, - { .start = 0x00a01db8, .end = 0x00a01df4 }, - { .start = 0x00a01dc0, .end = 0x00a01dfc }, - { .start = 0x00a01e00, .end = 0x00a01e2c }, - { .start = 0x00a01e40, .end = 0x00a01e60 }, - { .start = 0x00a01e68, .end = 0x00a01e6c }, - { .start = 0x00a01e74, .end = 0x00a01e74 }, - { .start = 0x00a01e84, .end = 0x00a01e90 }, - { .start = 0x00a01e9c, .end = 0x00a01ec4 }, - { .start = 0x00a01ed0, .end = 0x00a01ee0 }, - { .start = 0x00a01f00, .end = 0x00a01f1c }, - { .start = 0x00a01f44, .end = 0x00a01ffc }, - { .start = 0x00a02000, .end = 0x00a02048 }, - { .start = 0x00a02068, .end = 0x00a020f0 }, - { .start = 0x00a02100, .end = 0x00a02118 }, - { .start = 0x00a02140, .end = 0x00a0214c }, - { .start = 0x00a02168, .end = 0x00a0218c }, - { .start = 0x00a021c0, .end = 0x00a021c0 }, - { .start = 0x00a02400, .end = 0x00a02410 }, - { .start = 0x00a02418, .end = 0x00a02420 }, - { .start = 0x00a02428, .end = 0x00a0242c }, - { .start = 0x00a02434, .end = 0x00a02434 }, - { .start = 0x00a02440, .end = 0x00a02460 }, - { .start = 0x00a02468, .end = 0x00a024b0 }, - { .start = 0x00a024c8, .end = 0x00a024cc }, - { .start = 0x00a02500, .end = 0x00a02504 }, - { .start = 0x00a0250c, .end = 0x00a02510 }, - { .start = 0x00a02540, .end = 0x00a02554 }, - { .start = 0x00a02580, .end = 0x00a025f4 }, - { .start = 0x00a02600, .end = 0x00a0260c }, - { .start = 0x00a02648, .end = 0x00a02650 }, - { .start = 0x00a02680, .end = 0x00a02680 }, - { .start = 0x00a026c0, .end = 0x00a026d0 }, - { .start = 0x00a02700, .end = 0x00a0270c }, - { .start = 0x00a02804, .end = 0x00a02804 }, - { .start = 0x00a02818, .end = 0x00a0281c }, - { .start = 0x00a02c00, .end = 0x00a02db4 }, - { .start = 0x00a02df4, .end = 0x00a02fb0 }, - { .start = 0x00a03000, .end = 0x00a03014 }, - { .start = 0x00a0301c, .end = 0x00a0302c }, - { .start = 0x00a03034, .end = 0x00a03038 }, - { .start = 0x00a03040, .end = 0x00a03048 }, - { .start = 0x00a03060, .end = 0x00a03068 }, - { .start = 0x00a03070, .end = 0x00a03074 }, - { .start = 0x00a0307c, .end = 0x00a0307c }, - { .start = 0x00a03080, .end = 0x00a03084 }, - { .start = 0x00a0308c, .end = 0x00a03090 }, - { .start = 0x00a03098, .end = 0x00a03098 }, - { .start = 0x00a030a0, .end = 0x00a030a0 }, - { .start = 0x00a030a8, .end = 0x00a030b4 }, - { .start = 0x00a030bc, .end = 0x00a030bc }, - { .start = 0x00a030c0, .end = 0x00a0312c }, - { .start = 0x00a03c00, .end = 0x00a03c5c }, - { .start = 0x00a04400, .end = 0x00a04454 }, - { .start = 0x00a04460, .end = 0x00a04474 }, - { .start = 0x00a044c0, .end = 0x00a044ec }, - { .start = 0x00a04500, .end = 0x00a04504 }, - { .start = 0x00a04510, .end = 0x00a04538 }, - { .start = 0x00a04540, .end = 0x00a04548 }, - { .start = 0x00a04560, .end = 0x00a0457c }, - { .start = 0x00a04590, .end = 0x00a04598 }, - { .start = 0x00a045c0, .end = 0x00a045f4 }, -}; - -static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data) -{ - struct iwl_fw_error_dump_prph *prph; - unsigned long flags; - u32 prph_len = 0, i; - - if (!iwl_trans_grab_nic_access(trans, false, &flags)) - return 0; - - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - int reg; - __le32 *val; - - prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); - (*data)->len = cpu_to_le32(sizeof(*prph) + - num_bytes_in_chunk); - prph = (void *)(*data)->data; - prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - val = (void *)prph->data; - - for (reg = iwl_prph_dump_addr[i].start; - reg <= iwl_prph_dump_addr[i].end; - reg += 4) - *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, - reg)); - *data = iwl_fw_error_next_data(*data); - } - - iwl_trans_release_nic_access(trans, &flags); - - return prph_len; -} - -static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - int allocated_rb_nums) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int max_len = PAGE_SIZE << trans_pcie->rx_page_order; - struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 i, r, j, rb_len = 0; - - spin_lock(&rxq->lock); - - r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; - - for (i = rxq->read, j = 0; - i != r && j < allocated_rb_nums; - i = (i + 1) & RX_QUEUE_MASK, j++) { - struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; - struct iwl_fw_error_dump_rb *rb; - - dma_unmap_page(trans->dev, rxb->page_dma, max_len, - DMA_FROM_DEVICE); - - rb_len += sizeof(**data) + sizeof(*rb) + max_len; - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); - (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); - rb = (void *)(*data)->data; - rb->index = cpu_to_le32(i); - memcpy(rb->data, page_address(rxb->page), max_len); - /* remap the page for the free benefit */ - rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, - max_len, - DMA_FROM_DEVICE); - - *data = iwl_fw_error_next_data(*data); - } - - spin_unlock(&rxq->lock); - - return rb_len; -} -#define IWL_CSR_TO_DUMP (0x250) - -static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data) -{ - u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; - __le32 *val; - int i; - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); - (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); - val = (void *)(*data)->data; - - for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) - *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); - - *data = iwl_fw_error_next_data(*data); - - return csr_len; -} - -static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data) -{ - u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; - unsigned long flags; - __le32 *val; - int i; - - if (!iwl_trans_grab_nic_access(trans, false, &flags)) - return 0; - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); - (*data)->len = cpu_to_le32(fh_regs_len); - val = (void *)(*data)->data; - - for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) - *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); - - iwl_trans_release_nic_access(trans, &flags); - - *data = iwl_fw_error_next_data(*data); - - return sizeof(**data) + fh_regs_len; -} - -static u32 -iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, - struct iwl_fw_error_dump_fw_mon *fw_mon_data, - u32 monitor_len) -{ - u32 buf_size_in_dwords = (monitor_len >> 2); - u32 *buffer = (u32 *)fw_mon_data->data; - unsigned long flags; - u32 i; - - if (!iwl_trans_grab_nic_access(trans, false, &flags)) - return 0; - - __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1); - for (i = 0; i < buf_size_in_dwords; i++) - buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR); - __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0); - - iwl_trans_release_nic_access(trans, &flags); - - return monitor_len; -} - -static u32 -iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - u32 monitor_len) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 len = 0; - - if ((trans_pcie->fw_mon_page && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - trans->dbg_dest_tlv) { - struct iwl_fw_error_dump_fw_mon *fw_mon_data; - u32 base, write_ptr, wrap_cnt; - - /* If there was a dest TLV - use the values from there */ - if (trans->dbg_dest_tlv) { - write_ptr = - le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); - wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - } else { - base = MON_BUFF_BASE_ADDR; - write_ptr = MON_BUFF_WRPTR; - wrap_cnt = MON_BUFF_CYCLE_CNT; - } - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); - fw_mon_data = (void *)(*data)->data; - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); - fw_mon_data->fw_mon_cycle_cnt = - cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); - fw_mon_data->fw_mon_base_ptr = - cpu_to_le32(iwl_read_prph(trans, base)); - - len += sizeof(**data) + sizeof(*fw_mon_data); - if (trans_pcie->fw_mon_page) { - /* - * The firmware is now asserted, it won't write anything - * to the buffer. CPU can take ownership to fetch the - * data. The buffer will be handed back to the device - * before the firmware will be restarted. - */ - dma_sync_single_for_cpu(trans->dev, - trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - memcpy(fw_mon_data->data, - page_address(trans_pcie->fw_mon_page), - trans_pcie->fw_mon_size); - - monitor_len = trans_pcie->fw_mon_size; - } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { - /* - * Update pointers to reflect actual values after - * shifting - */ - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; - iwl_trans_read_mem(trans, base, fw_mon_data->data, - monitor_len / sizeof(u32)); - } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { - monitor_len = - iwl_trans_pci_dump_marbh_monitor(trans, - fw_mon_data, - monitor_len); - } else { - /* Didn't match anything - output no monitor data */ - monitor_len = 0; - } - - len += monitor_len; - (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); - } - - return len; -} - -static struct iwl_trans_dump_data -*iwl_trans_pcie_dump_data(struct iwl_trans *trans, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_fw_error_dump_txcmd *txcmd; - struct iwl_trans_dump_data *dump_data; - u32 len, num_rbs; - u32 monitor_len; - int i, ptr; - bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); - - /* transport dump header */ - len = sizeof(*dump_data); - - /* host commands */ - len += sizeof(*data) + - cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); - - /* FW monitor */ - if (trans_pcie->fw_mon_page) { - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + - trans_pcie->fw_mon_size; - monitor_len = trans_pcie->fw_mon_size; - } else if (trans->dbg_dest_tlv) { - u32 base, end; - - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); - - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; - end = iwl_read_prph(trans, end) << - trans->dbg_dest_tlv->end_shift; - - /* Make "end" point to the actual end */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 || - trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) - end += (1 << trans->dbg_dest_tlv->end_shift); - monitor_len = end - base; - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + - monitor_len; - } else { - monitor_len = 0; - } - - if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { - dump_data = vzalloc(len); - if (!dump_data) - return NULL; - - data = (void *)dump_data->data; - len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); - dump_data->len = len; - - return dump_data; - } - - /* CSR registers */ - len += sizeof(*data) + IWL_CSR_TO_DUMP; - - /* PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - - /* FH registers */ - len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); - - if (dump_rbs) { - /* RBs */ - num_rbs = le16_to_cpu(ACCESS_ONCE( - trans_pcie->rxq.rb_stts->closed_rb_num)) - & 0x0FFF; - num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; - len += num_rbs * (sizeof(*data) + - sizeof(struct iwl_fw_error_dump_rb) + - (PAGE_SIZE << trans_pcie->rx_page_order)); - } - - dump_data = vzalloc(len); - if (!dump_data) - return NULL; - - len = 0; - data = (void *)dump_data->data; - data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); - txcmd = (void *)data->data; - spin_lock_bh(&cmdq->lock); - ptr = cmdq->q.write_ptr; - for (i = 0; i < cmdq->q.n_window; i++) { - u8 idx = get_cmd_index(&cmdq->q, ptr); - u32 caplen, cmdlen; - - cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]); - caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); - - if (cmdlen) { - len += sizeof(*txcmd) + caplen; - txcmd->cmdlen = cpu_to_le32(cmdlen); - txcmd->caplen = cpu_to_le32(caplen); - memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); - txcmd = (void *)((u8 *)txcmd->data + caplen); - } - - ptr = iwl_queue_dec_wrap(ptr); - } - spin_unlock_bh(&cmdq->lock); - - data->len = cpu_to_le32(len); - len += sizeof(*data); - data = iwl_fw_error_next_data(data); - - len += iwl_trans_pcie_dump_prph(trans, &data); - len += iwl_trans_pcie_dump_csr(trans, &data); - len += iwl_trans_pcie_fh_regs_dump(trans, &data); - if (dump_rbs) - len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); - - len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); - - dump_data->len = len; - - return dump_data; -} - -static const struct iwl_trans_ops trans_ops_pcie = { - .start_hw = iwl_trans_pcie_start_hw, - .op_mode_leave = iwl_trans_pcie_op_mode_leave, - .fw_alive = iwl_trans_pcie_fw_alive, - .start_fw = iwl_trans_pcie_start_fw, - .stop_device = iwl_trans_pcie_stop_device, - - .d3_suspend = iwl_trans_pcie_d3_suspend, - .d3_resume = iwl_trans_pcie_d3_resume, - - .send_cmd = iwl_trans_pcie_send_hcmd, - - .tx = iwl_trans_pcie_tx, - .reclaim = iwl_trans_pcie_reclaim, - - .txq_disable = iwl_trans_pcie_txq_disable, - .txq_enable = iwl_trans_pcie_txq_enable, - - .dbgfs_register = iwl_trans_pcie_dbgfs_register, - - .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, - .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, - - .write8 = iwl_trans_pcie_write8, - .write32 = iwl_trans_pcie_write32, - .read32 = iwl_trans_pcie_read32, - .read_prph = iwl_trans_pcie_read_prph, - .write_prph = iwl_trans_pcie_write_prph, - .read_mem = iwl_trans_pcie_read_mem, - .write_mem = iwl_trans_pcie_write_mem, - .configure = iwl_trans_pcie_configure, - .set_pmi = iwl_trans_pcie_set_pmi, - .grab_nic_access = iwl_trans_pcie_grab_nic_access, - .release_nic_access = iwl_trans_pcie_release_nic_access, - .set_bits_mask = iwl_trans_pcie_set_bits_mask, - - .ref = iwl_trans_pcie_ref, - .unref = iwl_trans_pcie_unref, - - .dump_data = iwl_trans_pcie_dump_data, -}; - -struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct iwl_cfg *cfg) -{ - struct iwl_trans_pcie *trans_pcie; - struct iwl_trans *trans; - u16 pci_cmd; - int ret; - - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, cfg, &trans_ops_pcie, 0); - if (!trans) - return ERR_PTR(-ENOMEM); - - trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; - - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - trans_pcie->trans = trans; - spin_lock_init(&trans_pcie->irq_lock); - spin_lock_init(&trans_pcie->reg_lock); - spin_lock_init(&trans_pcie->ref_lock); - mutex_init(&trans_pcie->mutex); - init_waitqueue_head(&trans_pcie->ucode_write_waitq); - - ret = pci_enable_device(pdev); - if (ret) - goto out_no_pci; - - if (!cfg->base_params->pcie_l1_allowed) { - /* - * W/A - seems to solve weird behavior. We need to remove this - * if we don't want to stay in L1 all the time. This wastes a - * lot of power. - */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | - PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - } - - pci_set_master(pdev); - - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!ret) - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (ret) { - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!ret) - ret = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); - /* both attempts failed: */ - if (ret) { - dev_err(&pdev->dev, "No suitable DMA available\n"); - goto out_pci_disable_device; - } - } - - ret = pci_request_regions(pdev, DRV_NAME); - if (ret) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); - goto out_pci_disable_device; - } - - trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); - if (!trans_pcie->hw_base) { - dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); - ret = -ENODEV; - goto out_pci_release_regions; - } - - /* We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - - trans->dev = &pdev->dev; - trans_pcie->pci_dev = pdev; - iwl_disable_interrupts(trans); - - ret = pci_enable_msi(pdev); - if (ret) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); - /* enable rfkill interrupt: hw bug w/a */ - pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { - pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); - } - } - - trans->hw_rev = iwl_read32(trans, CSR_HW_REV); - /* - * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have - * changed, and now the revision step also includes bit 0-1 (no more - * "dash" value). To keep hw_rev backwards compatible - we'll store it - * in the old format. - */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - unsigned long flags; - - trans->hw_rev = (trans->hw_rev & 0xfff0) | - (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); - - ret = iwl_pcie_prepare_card_hw(trans); - if (ret) { - IWL_WARN(trans, "Exit HW not ready\n"); - goto out_pci_disable_msi; - } - - /* - * in-order to recognize C step driver should read chip version - * id located at the AUX bus MISC address space. - */ - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - udelay(2); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - 25000); - if (ret < 0) { - IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); - goto out_pci_disable_msi; - } - - if (iwl_trans_grab_nic_access(trans, false, &flags)) { - u32 hw_step; - - hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG); - hw_step |= ENABLE_WFPM; - __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step); - hw_step = __iwl_read_prph(trans, AUX_MISC_REG); - hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; - if (hw_step == 0x3) - trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | - (SILICON_C_STEP << 2); - iwl_trans_release_nic_access(trans, &flags); - } - } - - trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; - snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), - "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); - - /* Initialize the wait queue for commands */ - init_waitqueue_head(&trans_pcie->wait_command_queue); - - ret = iwl_pcie_alloc_ict(trans); - if (ret) - goto out_pci_disable_msi; - - ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, - iwl_pcie_irq_handler, - IRQF_SHARED, DRV_NAME, trans); - if (ret) { - IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); - goto out_free_ict; - } - - trans_pcie->inta_mask = CSR_INI_SET_MASK; - trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND; - - return trans; - -out_free_ict: - iwl_pcie_free_ict(trans); -out_pci_disable_msi: - pci_disable_msi(pdev); -out_pci_release_regions: - pci_release_regions(pdev); -out_pci_disable_device: - pci_disable_device(pdev); -out_no_pci: - iwl_trans_free(trans); - return ERR_PTR(ret); -} diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c deleted file mode 100644 index a8c8a4a7420b..000000000000 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ /dev/null @@ -1,1988 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ -#include -#include -#include - -#include "iwl-debug.h" -#include "iwl-csr.h" -#include "iwl-prph.h" -#include "iwl-io.h" -#include "iwl-scd.h" -#include "iwl-op-mode.h" -#include "internal.h" -/* FIXME: need to abstract out TX command (once we know what it looks like) */ -#include "dvm/commands.h" - -#define IWL_TX_CRC_SIZE 4 -#define IWL_TX_DELIMITER_SIZE 4 - -/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** - * DMA services - * - * Theory of operation - * - * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer - * of buffer descriptors, each of which points to one or more data buffers for - * the device to read from or fill. Driver and device exchange status of each - * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty - * entries in each circular buffer, to protect against confusing empty and full - * queue states. - * - * The device reads or writes the data in the queues via the device's several - * DMA/FIFO channels. Each queue is mapped to a single DMA channel. - * - * For Tx queue, there are low mark and high mark limits. If, after queuing - * the packet for Tx, free space become < low mark, Tx queue stopped. When - * reclaiming packets (on 'tx done IRQ), if free space become > high mark, - * Tx queue resumed. - * - ***************************************************/ -static int iwl_queue_space(const struct iwl_queue *q) -{ - unsigned int max; - unsigned int used; - - /* - * To avoid ambiguity between empty and completely full queues, there - * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. - * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need - * to reserve any queue entries for this purpose. - */ - if (q->n_window < TFD_QUEUE_SIZE_MAX) - max = q->n_window; - else - max = TFD_QUEUE_SIZE_MAX - 1; - - /* - * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to - * modulo by TFD_QUEUE_SIZE_MAX and is well defined. - */ - used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); - - if (WARN_ON(used > max)) - return 0; - - return max - used; -} - -/* - * iwl_queue_init - Initialize queue's high/low-water and read/write indexes - */ -static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) -{ - q->n_window = slots_num; - q->id = id; - - /* slots_num must be power-of-two size, otherwise - * get_cmd_index is broken. */ - if (WARN_ON(!is_power_of_2(slots_num))) - return -EINVAL; - - q->low_mark = q->n_window / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_window / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = 0; - q->read_ptr = 0; - - return 0; -} - -static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, - struct iwl_dma_ptr *ptr, size_t size) -{ - if (WARN_ON(ptr->addr)) - return -EINVAL; - - ptr->addr = dma_alloc_coherent(trans->dev, size, - &ptr->dma, GFP_KERNEL); - if (!ptr->addr) - return -ENOMEM; - ptr->size = size; - return 0; -} - -static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, - struct iwl_dma_ptr *ptr) -{ - if (unlikely(!ptr->addr)) - return; - - dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); - memset(ptr, 0, sizeof(*ptr)); -} - -static void iwl_pcie_txq_stuck_timer(unsigned long data) -{ - struct iwl_txq *txq = (void *)data; - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; - struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); - u32 scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); - u8 buf[16]; - int i; - - spin_lock(&txq->lock); - /* check if triggered erroneously */ - if (txq->q.read_ptr == txq->q.write_ptr) { - spin_unlock(&txq->lock); - return; - } - spin_unlock(&txq->lock); - - IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, - jiffies_to_msecs(txq->wd_timeout)); - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->q.read_ptr, txq->q.write_ptr); - - iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (i = 0; i < FH_TCSR_CHNL_NUM; i++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, - iwl_read_direct32(trans, FH_TX_TRB_REG(i))); - - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_trans_read_mem32(trans, - trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(i)); - - if (i & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; - - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - i, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & - (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); - } - - iwl_force_nmi(trans); -} - -/* - * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int write_ptr = txq->q.write_ptr; - int txq_id = txq->q.id; - u8 sec_ctl = 0; - u8 sta_id = 0; - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *) txq->entries[txq->q.write_ptr].cmd->payload; - - scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - - sta_id = tx_cmd->sta_id; - sec_ctl = tx_cmd->sec_ctl; - - switch (sec_ctl & TX_CMD_SEC_MSK) { - case TX_CMD_SEC_CCM: - len += IEEE80211_CCMP_MIC_LEN; - break; - case TX_CMD_SEC_TKIP: - len += IEEE80211_TKIP_ICV_LEN; - break; - case TX_CMD_SEC_WEP: - len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; - break; - } - - if (trans_pcie->bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) - return; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; -} - -static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - int txq_id = txq->q.id; - int read_ptr = txq->q.read_ptr; - u8 sta_id = 0; - __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->q.read_ptr].cmd->payload; - - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - - if (txq_id != trans_pcie->cmd_queue) - sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(1 | (sta_id << 12)); - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; - - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; -} - -/* - * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware - */ -static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 reg = 0; - int txq_id = txq->q.id; - - lockdep_assert_held(&txq->lock); - - /* - * explicitly wake up the NIC if: - * 1. shadow registers aren't enabled - * 2. NIC is woken up for CMD regardless of shadow outside this function - * 3. there is a chance that the NIC is asleep - */ - if (!trans->cfg->base_params->shadow_reg_enable && - txq_id != trans_pcie->cmd_queue && - test_bit(STATUS_TPOWER_PMI, &trans->status)) { - /* - * wake up nic if it's powered down ... - * uCode will wake up, and interrupt us again, so next - * time we'll skip this part. - */ - reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", - txq_id, reg); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - txq->need_update = true; - return; - } - } - - /* - * if not in power-save mode, uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); - iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); -} - -void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int i; - - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = &trans_pcie->txq[i]; - - spin_lock_bh(&txq->lock); - if (trans_pcie->txq[i].need_update) { - iwl_pcie_txq_inc_wr_ptr(trans, txq); - trans_pcie->txq[i].need_update = false; - } - spin_unlock_bh(&txq->lock); - } -} - -static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - dma_addr_t addr = get_unaligned_le32(&tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - addr |= - ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; - - return addr; -} - -static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, - dma_addr_t addr, u16 len) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd->num_tbs = idx + 1; -} - -static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) -{ - return tfd->num_tbs & 0x1f; -} - -static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_tfd *tfd) -{ - int i; - int num_tbs; - - /* Sanity check on number of chunks */ - num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); - - if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* first TB is never freed - it's the scratchbuf data */ - - for (i = 1; i < num_tbs; i++) { - if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) - dma_unmap_page(trans->dev, - iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - } - tfd->num_tbs = 0; -} - -/* - * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @trans - transport private data - * @txq - tx queue - * @dma_dir - the direction of the DMA mapping - * - * Does NOT advance any TFD circular buffer read/write indexes - * Does NOT free the TFD itself (which is within circular buffer) - */ -static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - struct iwl_tfd *tfd_tmp = txq->tfds; - - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int rd_ptr = txq->q.read_ptr; - int idx = get_cmd_index(&txq->q, rd_ptr); - - lockdep_assert_held(&txq->lock); - - /* We have only q->n_window txq->entries, but we use - * TFD_QUEUE_SIZE_MAX tfds - */ - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); - - /* free SKB */ - if (txq->entries) { - struct sk_buff *skb; - - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } - } -} - -static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - dma_addr_t addr, u16 len, bool reset) -{ - struct iwl_queue *q; - struct iwl_tfd *tfd, *tfd_tmp; - u32 num_tbs; - - q = &txq->q; - tfd_tmp = txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; - - if (reset) - memset(tfd, 0, sizeof(*tfd)); - - num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); - - /* Each TFD can point to a maximum 20 Tx buffers */ - if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(trans, "Error can not send more than %d chunks\n", - IWL_NUM_OF_TBS); - return -EINVAL; - } - - if (WARN(addr & ~IWL_TX_DMA_MASK, - "Unaligned address = %llx\n", (unsigned long long)addr)) - return -EINVAL; - - iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); - - return num_tbs; -} - -static int iwl_pcie_txq_alloc(struct iwl_trans *trans, - struct iwl_txq *txq, int slots_num, - u32 txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; - size_t scratchbuf_sz; - int i; - - if (WARN_ON(txq->entries || txq->tfds)) - return -EINVAL; - - setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, - (unsigned long)txq); - txq->trans_pcie = trans_pcie; - - txq->q.n_window = slots_num; - - txq->entries = kcalloc(slots_num, - sizeof(struct iwl_pcie_txq_entry), - GFP_KERNEL); - - if (!txq->entries) - goto error; - - if (txq_id == trans_pcie->cmd_queue) - for (i = 0; i < slots_num; i++) { - txq->entries[i].cmd = - kmalloc(sizeof(struct iwl_device_cmd), - GFP_KERNEL); - if (!txq->entries[i].cmd) - goto error; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, - &txq->q.dma_addr, GFP_KERNEL); - if (!txq->tfds) - goto error; - - BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); - BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != - sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch)); - - scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; - - txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, - &txq->scratchbufs_dma, - GFP_KERNEL); - if (!txq->scratchbufs) - goto err_free_tfds; - - txq->q.id = txq_id; - - return 0; -err_free_tfds: - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); -error: - if (txq->entries && txq_id == trans_pcie->cmd_queue) - for (i = 0; i < slots_num; i++) - kfree(txq->entries[i].cmd); - kfree(txq->entries); - txq->entries = NULL; - - return -ENOMEM; - -} - -static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, u32 txq_id) -{ - int ret; - - txq->need_update = false; - - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise - * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(&txq->q, slots_num, txq_id); - if (ret) - return ret; - - spin_lock_init(&txq->lock); - - /* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * Circular buffer (TFD queue in DRAM) physical base address */ - iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); - - return 0; -} - -/* - * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's - */ -static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - struct iwl_queue *q = &txq->q; - - spin_lock_bh(&txq->lock); - while (q->write_ptr != q->read_ptr) { - IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", - txq_id, q->read_ptr); - iwl_pcie_txq_free_tfd(trans, txq); - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); - } - txq->active = false; - spin_unlock_bh(&txq->lock); - - /* just in case - this queue may have been stopped */ - iwl_wake_queue(trans, txq); -} - -/* - * iwl_pcie_txq_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - struct device *dev = trans->dev; - int i; - - if (WARN_ON(!txq)) - return; - - iwl_pcie_txq_unmap(trans, txq_id); - - /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) - for (i = 0; i < txq->q.n_window; i++) { - kzfree(txq->entries[i].cmd); - kzfree(txq->entries[i].free_buf); - } - - /* De-alloc circular buffer of TFDs */ - if (txq->tfds) { - dma_free_coherent(dev, - sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, - txq->tfds, txq->q.dma_addr); - txq->q.dma_addr = 0; - txq->tfds = NULL; - - dma_free_coherent(dev, - sizeof(*txq->scratchbufs) * txq->q.n_window, - txq->scratchbufs, txq->scratchbufs_dma); - } - - kfree(txq->entries); - txq->entries = NULL; - - del_timer_sync(&txq->stuck_timer); - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} - -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int nq = trans->cfg->base_params->num_of_queues; - int chan; - u32 reg_val; - int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - - SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); - - /* make sure all queue are not stopped/used */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); - - trans_pcie->scd_base_addr = - iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); - - WARN_ON(scd_base_addr != 0 && - scd_base_addr != trans_pcie->scd_base_addr); - - /* reset context data, TX status and translation data */ - iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_MEM_LOWER_BOUND, - NULL, clear_dwords); - - iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, - trans_pcie->scd_bc_tbls.dma >> 10); - - /* The chain extension of the SCD doesn't work well. This feature is - * enabled by default by the HW, so we need to disable it manually. - */ - if (trans->cfg->base_params->scd_chain_ext_wa) - iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - - iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, - trans_pcie->cmd_fifo, - trans_pcie->cmd_q_wdg_timeout); - - /* Activate all Tx DMA/FIFO channels */ - iwl_scd_activate_fifos(trans); - - /* Enable DMA channel */ - for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) - iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); - - /* Update FH chicken bits */ - reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); - iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, - reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); - - /* Enable L1-Active */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); -} - -void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int txq_id; - - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; - txq_id++) { - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - - iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); - iwl_pcie_txq_unmap(trans, txq_id); - txq->q.read_ptr = 0; - txq->q.write_ptr = 0; - } - - /* Tell NIC where to find the "keep warm" buffer */ - iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, - trans_pcie->kw.dma >> 4); - - /* - * Send 0 as the scd_base_addr since the device may have be reset - * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will - * contain garbage. - */ - iwl_pcie_tx_start(trans, 0); -} - -static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - int ch, ret; - u32 mask = 0; - - spin_lock(&trans_pcie->irq_lock); - - if (!iwl_trans_grab_nic_access(trans, false, &flags)) - goto out; - - /* Stop each Tx DMA channel */ - for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { - iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); - } - - /* Wait for DMA channels to be idle */ - ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); - if (ret < 0) - IWL_ERR(trans, - "Failing on timeout while stopping DMA channel %d [0x%08x]\n", - ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); - - iwl_trans_release_nic_access(trans, &flags); - -out: - spin_unlock(&trans_pcie->irq_lock); -} - -/* - * iwl_pcie_tx_stop - Stop all Tx DMA channels - */ -int iwl_pcie_tx_stop(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int txq_id; - - /* Turn off all Tx DMA fifos */ - iwl_scd_deactivate_fifos(trans); - - /* Turn off all Tx DMA channels */ - iwl_pcie_tx_stop_fh(trans); - - /* - * This function can be called before the op_mode disabled the - * queues. This happens when we have an rfkill interrupt. - * Since we stop Tx altogether - mark the queues as stopped. - */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); - - /* This can happen: start_hw, stop_device */ - if (!trans_pcie->txq) - return 0; - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; - txq_id++) - iwl_pcie_txq_unmap(trans, txq_id); - - return 0; -} - -/* - * iwl_trans_tx_free - Free TXQ Context - * - * Destroy all TX DMA queues and structures - */ -void iwl_pcie_tx_free(struct iwl_trans *trans) -{ - int txq_id; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - /* Tx queues */ - if (trans_pcie->txq) { - for (txq_id = 0; - txq_id < trans->cfg->base_params->num_of_queues; txq_id++) - iwl_pcie_txq_free(trans, txq_id); - } - - kfree(trans_pcie->txq); - trans_pcie->txq = NULL; - - iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); - - iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); -} - -/* - * iwl_pcie_tx_alloc - allocate TX context - * Allocate all Tx DMA structures and initialize them - */ -static int iwl_pcie_tx_alloc(struct iwl_trans *trans) -{ - int ret; - int txq_id, slots_num; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * - sizeof(struct iwlagn_scd_bc_tbl); - - /*It is not allowed to alloc twice, so warn when this happens. - * We cannot rely on the previous allocation, so free and fail */ - if (WARN_ON(trans_pcie->txq)) { - ret = -EINVAL; - goto error; - } - - ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, - scd_bc_tbls_size); - if (ret) { - IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); - goto error; - } - - /* Alloc keep-warm buffer */ - ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); - if (ret) { - IWL_ERR(trans, "Keep Warm allocation failed\n"); - goto error; - } - - trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_txq), GFP_KERNEL); - if (!trans_pcie->txq) { - IWL_ERR(trans, "Not enough memory for txq\n"); - ret = -ENOMEM; - goto error; - } - - /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; - txq_id++) { - slots_num = (txq_id == trans_pcie->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], - slots_num, txq_id); - if (ret) { - IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); - goto error; - } - } - - return 0; - -error: - iwl_pcie_tx_free(trans); - - return ret; -} -int iwl_pcie_tx_init(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - int txq_id, slots_num; - bool alloc = false; - - if (!trans_pcie->txq) { - ret = iwl_pcie_tx_alloc(trans); - if (ret) - goto error; - alloc = true; - } - - spin_lock(&trans_pcie->irq_lock); - - /* Turn off all Tx DMA fifos */ - iwl_scd_deactivate_fifos(trans); - - /* Tell NIC where to find the "keep warm" buffer */ - iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, - trans_pcie->kw.dma >> 4); - - spin_unlock(&trans_pcie->irq_lock); - - /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; - txq_id++) { - slots_num = (txq_id == trans_pcie->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], - slots_num, txq_id); - if (ret) { - IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); - goto error; - } - } - - iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); - if (trans->cfg->base_params->num_of_queues > 20) - iwl_set_bits_prph(trans, SCD_GP_CTRL, - SCD_GP_CTRL_ENABLE_31_QUEUES); - - return 0; -error: - /*Upon error, free only if we allocated something */ - if (alloc) - iwl_pcie_tx_free(trans); - return ret; -} - -static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - if (!txq->wd_timeout) - return; - - /* - * station is asleep and we send data - that must - * be uAPSD or PS-Poll. Don't rearm the timer. - */ - if (txq->frozen) - return; - - /* - * if empty delete timer, otherwise move timer forward - * since we're making progress on this queue - */ - if (txq->q.read_ptr == txq->q.write_ptr) - del_timer(&txq->stuck_timer); - else - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); -} - -/* Frees buffers until index _not_ inclusive */ -void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); - struct iwl_queue *q = &txq->q; - int last_to_free; - - /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans_pcie->cmd_queue)) - return; - - spin_lock_bh(&txq->lock); - - if (!txq->active) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", - txq_id, ssn); - goto out; - } - - if (txq->q.read_ptr == tfd_num) - goto out; - - IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", - txq_id, txq->q.read_ptr, tfd_num, ssn); - - /*Since we free until index _not_ inclusive, the one before index is - * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(tfd_num); - - if (!iwl_queue_used(q, last_to_free)) { - IWL_ERR(trans, - "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, - q->write_ptr, q->read_ptr); - goto out; - } - - if (WARN_ON(!skb_queue_empty(skbs))) - goto out; - - for (; - q->read_ptr != tfd_num; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { - - if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) - continue; - - __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); - - txq->entries[txq->q.read_ptr].skb = NULL; - - iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); - - iwl_pcie_txq_free_tfd(trans, txq); - } - - iwl_pcie_txq_progress(txq); - - if (iwl_queue_space(&txq->q) > txq->q.low_mark) - iwl_wake_queue(trans, txq); - - if (q->read_ptr == q->write_ptr) { - IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); - iwl_trans_pcie_unref(trans); - } - -out: - spin_unlock_bh(&txq->lock); -} - -static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, - const struct iwl_host_cmd *cmd) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - lockdep_assert_held(&trans_pcie->reg_lock); - - if (!(cmd->flags & CMD_SEND_IN_IDLE) && - !trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = true; - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); - iwl_trans_pcie_ref(trans); - } - - /* - * wake up the NIC to make sure that the firmware will see the host - * command - we will let the NIC sleep once all the host commands - * returned. This needs to be done only on NICs that have - * apmg_wake_up_wa set. - */ - if (trans->cfg->base_params->apmg_wake_up_wa && - !trans_pcie->cmd_hold_nic_awake) { - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), - 15000); - if (ret < 0) { - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); - return -EIO; - } - trans_pcie->cmd_hold_nic_awake = true; - } - - return 0; -} - -static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - lockdep_assert_held(&trans_pcie->reg_lock); - - if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); - iwl_trans_pcie_unref(trans); - } - - if (trans->cfg->base_params->apmg_wake_up_wa) { - if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) - return 0; - - trans_pcie->cmd_hold_nic_awake = false; - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - } - return 0; -} - -/* - * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd - * - * When FW advances 'R' index, all entries between old and new 'R' index - * need to be reclaimed. As result, some free space forms. If there is - * enough free space (> low mark), wake the stack that feeds us. - */ -static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - struct iwl_queue *q = &txq->q; - unsigned long flags; - int nfreed = 0; - - lockdep_assert_held(&txq->lock); - - if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { - IWL_ERR(trans, - "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, - q->write_ptr, q->read_ptr); - return; - } - - for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { - - if (nfreed++ > 0) { - IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", - idx, q->write_ptr, q->read_ptr); - iwl_force_nmi(trans); - } - } - - if (q->read_ptr == q->write_ptr) { - spin_lock_irqsave(&trans_pcie->reg_lock, flags); - iwl_pcie_clear_cmd_in_flight(trans); - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - } - - iwl_pcie_txq_progress(txq); -} - -static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, - u16 txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 tbl_dw_addr; - u32 tbl_dw; - u16 scd_q2ratid; - - scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; - - tbl_dw_addr = trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); - - tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); - - if (txq_id & 0x1) - tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); - else - tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - - iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); - - return 0; -} - -/* Receiver address (actually, Rx station's index into station table), - * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ -#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) - -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - int fifo = -1; - - if (test_and_set_bit(txq_id, trans_pcie->queue_used)) - WARN_ONCE(1, "queue %d already used - expect issues", txq_id); - - txq->wd_timeout = msecs_to_jiffies(wdg_timeout); - - if (cfg) { - fifo = cfg->fifo; - - /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans_pcie->cmd_queue && - trans_pcie->scd_set_active) - iwl_scd_enable_set_active(trans, 0); - - /* Stop this Tx queue before configuring it */ - iwl_scd_txq_set_inactive(trans, txq_id); - - /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans_pcie->cmd_queue) - iwl_scd_txq_set_chain(trans, txq_id); - - if (cfg->aggregate) { - u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); - - /* Map receiver-address / traffic-ID to this queue */ - iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); - - /* enable aggregations for the queue */ - iwl_scd_txq_enable_agg(trans, txq_id); - txq->ampdu = true; - } else { - /* - * disable aggregations for the queue, this will also - * make the ra_tid mapping configuration irrelevant - * since it is now a non-AGG queue. - */ - iwl_scd_txq_disable_agg(trans, txq_id); - - ssn = txq->q.read_ptr; - } - } - - /* Place first TFD at index corresponding to start sequence number. - * Assumes that ssn_idx is valid (!= 0xFFF) */ - txq->q.read_ptr = (ssn & 0xff); - txq->q.write_ptr = (ssn & 0xff); - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - (ssn & 0xff) | (txq_id << 8)); - - if (cfg) { - u8 frame_limit = cfg->frame_limit; - - iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); - - /* Set up Tx window size and frame limit for this queue */ - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); - iwl_trans_write_mem32(trans, - trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), - ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | - ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); - - /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << SCD_QUEUE_STTS_REG_POS_WSL) | - SCD_QUEUE_STTS_REG_MSK); - - /* enable the scheduler for this queue (only) */ - if (txq_id == trans_pcie->cmd_queue && - trans_pcie->scd_set_active) - iwl_scd_enable_set_active(trans, BIT(txq_id)); - - IWL_DEBUG_TX_QUEUES(trans, - "Activate queue %d on FIFO %d WrPtr: %d\n", - txq_id, fifo, ssn & 0xff); - } else { - IWL_DEBUG_TX_QUEUES(trans, - "Activate queue %d WrPtr: %d\n", - txq_id, ssn & 0xff); - } - - txq->active = true; -} - -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, - bool configure_scd) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 stts_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq_id); - static const u32 zero_val[4] = {}; - - trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; - trans_pcie->txq[txq_id].frozen = false; - - /* - * Upon HW Rfkill - we stop the device, and then stop the queues - * in the op_mode. Just for the sake of the simplicity of the op_mode, - * allow the op_mode to call txq_disable after it already called - * stop_device. - */ - if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { - WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), - "queue %d not used", txq_id); - return; - } - - if (configure_scd) { - iwl_scd_txq_set_inactive(trans, txq_id); - - iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, - ARRAY_SIZE(zero_val)); - } - - iwl_pcie_txq_unmap(trans, txq_id); - trans_pcie->txq[txq_id].ampdu = false; - - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); -} - -/*************** HOST COMMAND QUEUE FUNCTIONS *****/ - -/* - * iwl_pcie_enqueue_hcmd - enqueue a uCode command - * @priv: device private data point - * @cmd: a pointer to the ucode command structure - * - * The function returns < 0 values to indicate the operation - * failed. On success, it returns the index (>= 0) of command in the - * command queue. - */ -static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_queue *q = &txq->q; - struct iwl_device_cmd *out_cmd; - struct iwl_cmd_meta *out_meta; - unsigned long flags; - void *dup_buf = NULL; - dma_addr_t phys_addr; - int idx; - u16 copy_size, cmd_size, scratch_size; - bool had_nocopy = false; - u8 group_id = iwl_cmd_groupid(cmd->id); - int i, ret; - u32 cmd_pos; - const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; - u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - - if (WARN(!trans_pcie->wide_cmd_header && - group_id > IWL_ALWAYS_LONG_GROUP, - "unsupported wide command %#x\n", cmd->id)) - return -EINVAL; - - if (group_id != 0) { - copy_size = sizeof(struct iwl_cmd_header_wide); - cmd_size = sizeof(struct iwl_cmd_header_wide); - } else { - copy_size = sizeof(struct iwl_cmd_header); - cmd_size = sizeof(struct iwl_cmd_header); - } - - /* need one for the header if the first is NOCOPY */ - BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); - - for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - cmddata[i] = cmd->data[i]; - cmdlen[i] = cmd->len[i]; - - if (!cmd->len[i]) - continue; - - /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ - if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { - int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; - - if (copy > cmdlen[i]) - copy = cmdlen[i]; - cmdlen[i] -= copy; - cmddata[i] += copy; - copy_size += copy; - } - - if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { - had_nocopy = true; - if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { - idx = -EINVAL; - goto free_dup_buf; - } - } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { - /* - * This is also a chunk that isn't copied - * to the static buffer so set had_nocopy. - */ - had_nocopy = true; - - /* only allowed once */ - if (WARN_ON(dup_buf)) { - idx = -EINVAL; - goto free_dup_buf; - } - - dup_buf = kmemdup(cmddata[i], cmdlen[i], - GFP_ATOMIC); - if (!dup_buf) - return -ENOMEM; - } else { - /* NOCOPY must not be followed by normal! */ - if (WARN_ON(had_nocopy)) { - idx = -EINVAL; - goto free_dup_buf; - } - copy_size += cmdlen[i]; - } - cmd_size += cmd->len[i]; - } - - /* - * If any of the command structures end up being larger than - * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically - * allocated into separate TFDs, then we will need to - * increase the size of the buffers. - */ - if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, - "Command %s (%#x) is too large (%d bytes)\n", - get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { - idx = -EINVAL; - goto free_dup_buf; - } - - spin_lock_bh(&txq->lock); - - if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_bh(&txq->lock); - - IWL_ERR(trans, "No space in command queue\n"); - iwl_op_mode_cmd_queue_full(trans->op_mode); - idx = -ENOSPC; - goto free_dup_buf; - } - - idx = get_cmd_index(q, q->write_ptr); - out_cmd = txq->entries[idx].cmd; - out_meta = &txq->entries[idx].meta; - - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ - if (cmd->flags & CMD_WANT_SKB) - out_meta->source = cmd; - - /* set up the header */ - if (group_id != 0) { - out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); - out_cmd->hdr_wide.group_id = group_id; - out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); - out_cmd->hdr_wide.length = - cpu_to_le16(cmd_size - - sizeof(struct iwl_cmd_header_wide)); - out_cmd->hdr_wide.reserved = 0; - out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); - - cmd_pos = sizeof(struct iwl_cmd_header_wide); - copy_size = sizeof(struct iwl_cmd_header_wide); - } else { - out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); - out_cmd->hdr.group_id = 0; - - cmd_pos = sizeof(struct iwl_cmd_header); - copy_size = sizeof(struct iwl_cmd_header); - } - - /* and copy the data that needs to be copied */ - for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - int copy; - - if (!cmd->len[i]) - continue; - - /* copy everything if not nocopy/dup */ - if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | - IWL_HCMD_DFL_DUP))) { - copy = cmd->len[i]; - - memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); - cmd_pos += copy; - copy_size += copy; - continue; - } - - /* - * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied - * in total (for the scratchbuf handling), but copy up to what - * we can fit into the payload for debug dump purposes. - */ - copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); - - memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); - cmd_pos += copy; - - /* However, treat copy_size the proper way, we need it below */ - if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { - copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; - - if (copy > cmd->len[i]) - copy = cmd->len[i]; - copy_size += copy; - } - } - - IWL_DEBUG_HC(trans, - "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", - get_cmd_string(trans_pcie, out_cmd->hdr.cmd), - group_id, out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); - - /* start the TFD with the scratchbuf */ - scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); - memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); - iwl_pcie_txq_build_tfd(trans, txq, - iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), - scratch_size, true); - - /* map first command fragment, if any remains */ - if (copy_size > scratch_size) { - phys_addr = dma_map_single(trans->dev, - ((u8 *)&out_cmd->hdr) + scratch_size, - copy_size - scratch_size, - DMA_TO_DEVICE); - if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); - idx = -ENOMEM; - goto out; - } - - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, - copy_size - scratch_size, false); - } - - /* map the remaining (adjusted) nocopy/dup fragments */ - for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - const void *data = cmddata[i]; - - if (!cmdlen[i]) - continue; - if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | - IWL_HCMD_DFL_DUP))) - continue; - if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) - data = dup_buf; - phys_addr = dma_map_single(trans->dev, (void *)data, - cmdlen[i], DMA_TO_DEVICE); - if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); - idx = -ENOMEM; - goto out; - } - - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); - } - - BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > - sizeof(out_meta->flags) * BITS_PER_BYTE); - out_meta->flags = cmd->flags; - if (WARN_ON_ONCE(txq->entries[idx].free_buf)) - kzfree(txq->entries[idx].free_buf); - txq->entries[idx].free_buf = dup_buf; - - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); - - /* start timer if queue currently empty */ - if (q->read_ptr == q->write_ptr && txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - - spin_lock_irqsave(&trans_pcie->reg_lock, flags); - ret = iwl_pcie_set_cmd_in_flight(trans, cmd); - if (ret < 0) { - idx = ret; - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - goto out; - } - - /* Increment and update queue's write index */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); - iwl_pcie_txq_inc_wr_ptr(trans, txq); - - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - - out: - spin_unlock_bh(&txq->lock); - free_dup_buf: - if (idx < 0) - kfree(dup_buf); - return idx; -} - -/* - * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them - * @rxb: Rx buffer to reclaim - * - * If an Rx buffer has an async callback associated with it the callback - * will be executed. The attached skb (if present) will only be freed - * if the callback returns 1 - */ -void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u16 sequence = le16_to_cpu(pkt->hdr.sequence); - int txq_id = SEQ_TO_QUEUE(sequence); - int index = SEQ_TO_INDEX(sequence); - int cmd_index; - struct iwl_device_cmd *cmd; - struct iwl_cmd_meta *meta; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - - /* If a Tx command is being handled and it isn't in the actual - * command queue then there a command routing bug has been introduced - * in the queue management code. */ - if (WARN(txq_id != trans_pcie->cmd_queue, - "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->cmd_queue, sequence, - trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, - trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { - iwl_print_hex_error(trans, pkt, 32); - return; - } - - spin_lock_bh(&txq->lock); - - cmd_index = get_cmd_index(&txq->q, index); - cmd = txq->entries[cmd_index].cmd; - meta = &txq->entries[cmd_index].meta; - - iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); - - /* Input error checking is done when commands are added to queue. */ - if (meta->flags & CMD_WANT_SKB) { - struct page *p = rxb_steal_page(rxb); - - meta->source->resp_pkt = pkt; - meta->source->_rx_page_addr = (unsigned long)page_address(p); - meta->source->_rx_page_order = trans_pcie->rx_page_order; - } - - iwl_pcie_cmdq_reclaim(trans, txq_id, index); - - if (!(meta->flags & CMD_ASYNC)) { - if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { - IWL_WARN(trans, - "HCMD_ACTIVE already clear for command %s\n", - get_cmd_string(trans_pcie, cmd->hdr.cmd)); - } - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->hdr.cmd)); - wake_up(&trans_pcie->wait_command_queue); - } - - meta->flags = 0; - - spin_unlock_bh(&txq->lock); -} - -#define HOST_COMPLETE_TIMEOUT (2 * HZ) - -static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - /* An asynchronous command can not expect an SKB to be set. */ - if (WARN_ON(cmd->flags & CMD_WANT_SKB)) - return -EINVAL; - - ret = iwl_pcie_enqueue_hcmd(trans, cmd); - if (ret < 0) { - IWL_ERR(trans, - "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(trans_pcie, cmd->id), ret); - return ret; - } - return 0; -} - -static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int cmd_idx; - int ret; - - IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", - get_cmd_string(trans_pcie, cmd->id)); - - if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status), - "Command %s: a command is already active!\n", - get_cmd_string(trans_pcie, cmd->id))) - return -EIO; - - IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->id)); - - cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); - if (cmd_idx < 0) { - ret = cmd_idx; - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_ERR(trans, - "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(trans_pcie, cmd->id), ret); - return ret; - } - - ret = wait_event_timeout(trans_pcie->wait_command_queue, - !test_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status), - HOST_COMPLETE_TIMEOUT); - if (!ret) { - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_queue *q = &txq->q; - - IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - get_cmd_string(trans_pcie, cmd->id), - jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", - q->read_ptr, q->write_ptr); - - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->id)); - ret = -ETIMEDOUT; - - iwl_force_nmi(trans); - iwl_trans_fw_error(trans); - - goto cancel; - } - - if (test_bit(STATUS_FW_ERROR, &trans->status)) { - IWL_ERR(trans, "FW error in SYNC CMD %s\n", - get_cmd_string(trans_pcie, cmd->id)); - dump_stack(); - ret = -EIO; - goto cancel; - } - - if (!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL, &trans->status)) { - IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); - ret = -ERFKILL; - goto cancel; - } - - if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { - IWL_ERR(trans, "Error: Response NULL in '%s'\n", - get_cmd_string(trans_pcie, cmd->id)); - ret = -EIO; - goto cancel; - } - - return 0; - -cancel: - if (cmd->flags & CMD_WANT_SKB) { - /* - * Cancel the CMD_WANT_SKB flag for the cmd in the - * TX cmd queue. Otherwise in case the cmd comes - * in later, it will possibly set an invalid - * address (cmd->meta.source). - */ - trans_pcie->txq[trans_pcie->cmd_queue]. - entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; - } - - if (cmd->resp_pkt) { - iwl_free_resp(cmd); - cmd->resp_pkt = NULL; - } - - return ret; -} - -int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) -{ - if (!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL, &trans->status)) { - IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", - cmd->id); - return -ERFKILL; - } - - if (cmd->flags & CMD_ASYNC) - return iwl_pcie_send_hcmd_async(trans, cmd); - - /* We still can fail on RFKILL that can be asserted while we wait */ - return iwl_pcie_send_hcmd_sync(trans, cmd); -} - -int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct ieee80211_hdr *hdr; - struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - struct iwl_cmd_meta *out_meta; - struct iwl_txq *txq; - struct iwl_queue *q; - dma_addr_t tb0_phys, tb1_phys, scratch_phys; - void *tb1_addr; - u16 len, tb1_len, tb2_len; - bool wait_write_ptr; - __le16 fc; - u8 hdr_len; - u16 wifi_seq; - int i; - - txq = &trans_pcie->txq[txq_id]; - q = &txq->q; - - if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), - "TX on unused queue %d\n", txq_id)) - return -EINVAL; - - if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && - __skb_linearize(skb)) - return -ENOMEM; - - /* mac80211 always puts the full header into the SKB's head, - * so there's no need to check if it's readable there - */ - hdr = (struct ieee80211_hdr *)skb->data; - fc = hdr->frame_control; - hdr_len = ieee80211_hdrlen(fc); - - spin_lock(&txq->lock); - - /* In AGG mode, the index in the ring must correspond to the WiFi - * sequence number. This is a HW requirements to help the SCD to parse - * the BA. - * Check here that the packets are in the right place on the ring. - */ - wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE(txq->ampdu && - (wifi_seq & 0xff) != q->write_ptr, - "Q: %d WiFi Seq %d tfdNum %d", - txq_id, wifi_seq, q->write_ptr); - - /* Set up driver data for this TFD */ - txq->entries[q->write_ptr].skb = skb; - txq->entries[q->write_ptr].cmd = dev_cmd; - - dev_cmd->hdr.sequence = - cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(q->write_ptr))); - - tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); - scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); - - tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); - tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[q->write_ptr].meta; - out_meta->flags = 0; - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + - hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; - tb1_len = ALIGN(len, 4); - - /* Tell NIC about any 2-byte padding after MAC header */ - if (tb1_len != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - - /* The first TB points to the scratchbuf data - min_copy bytes */ - memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, - IWL_HCMD_SCRATCHBUF_SIZE); - iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, - IWL_HCMD_SCRATCHBUF_SIZE, true); - - /* there must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; - tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) - goto out_err; - iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); - - /* - * Set up TFD's third entry to point directly to remainder - * of skb's head, if any - */ - tb2_len = skb_headlen(skb) - hdr_len; - if (tb2_len > 0) { - dma_addr_t tb2_phys = dma_map_single(trans->dev, - skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); - goto out_err; - } - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); - } - - /* set up the remaining entries to point to the data */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr_t tb_phys; - int tb_idx; - - if (!skb_frag_size(frag)) - continue; - - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); - goto out_err; - } - tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, - skb_frag_size(frag), false); - - out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); - } - - /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); - - trace_iwlwifi_dev_tx(trans->dev, skb, - &txq->tfds[txq->q.write_ptr], - sizeof(struct iwl_tfd), - &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, - skb->data + hdr_len, tb2_len); - trace_iwlwifi_dev_tx_data(trans->dev, skb, - hdr_len, skb->len - hdr_len); - - wait_write_ptr = ieee80211_has_morefrags(fc); - - /* start timer if queue currently empty */ - if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) { - /* - * If the TXQ is active, then set the timer, if not, - * set the timer in remainder so that the timer will - * be armed with the right value when the station will - * wake up. - */ - if (!txq->frozen) - mod_timer(&txq->stuck_timer, - jiffies + txq->wd_timeout); - else - txq->frozen_expiry_remainder = txq->wd_timeout; - } - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); - iwl_trans_pcie_ref(trans); - } - - /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); - if (!wait_write_ptr) - iwl_pcie_txq_inc_wr_ptr(trans, txq); - - /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually. - */ - if (iwl_queue_space(q) < q->high_mark) { - if (wait_write_ptr) - iwl_pcie_txq_inc_wr_ptr(trans, txq); - else - iwl_stop_queue(trans, txq); - } - spin_unlock(&txq->lock); - return 0; -out_err: - spin_unlock(&txq->lock); - return -1; -}