--- /dev/null
- sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
+
+const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+ lockdep_assert_held(&priv->sta_lock);
+
+ if (sta_id >= IWLAGN_STATION_COUNT) {
+ IWL_ERR(priv, "invalid sta_id %u", sta_id);
+ return -EINVAL;
+ }
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
+ "addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode "
+ "(according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
+ return 0;
+}
+
+static int iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
+ u8 sta_id = addsta->sta.sta_id;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ return ret;
+ }
+
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock(&priv->sta_lock);
+
+ switch (add_sta_resp->status) {
+ case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+ ret = iwl_sta_ucode_activate(priv, sta_id);
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack "
+ "resource.\n", sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ add_sta_resp->status);
+ break;
+ }
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+ spin_unlock(&priv->sta_lock);
+
+ return ret;
+}
+
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *) cmd->payload;
+
+ return iwl_process_add_sta_resp(priv, addsta, pkt);
+}
+
+int iwl_send_add_sta(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *sta, u8 flags)
+{
+ int ret = 0;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ADD_STA,
+ .flags = flags,
+ .data = { sta, },
+ .len = { sizeof(*sta), },
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
+
+ if (!(flags & CMD_ASYNC)) {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ ret = iwl_dvm_send_cmd(priv, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+ /*else the command was successfully sent in SYNC mode, need to free
+ * the reply page */
+
+ iwl_free_resp(&cmd);
+
+ if (cmd.handler_status)
+ IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
+ cmd.handler_status);
+
+ return cmd.handler_status;
+}
+
+bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (priv->disable_ht40)
+ return false;
+#endif
+
+ /*
+ * Remainder of this function checks ht_cap, but if it's
+ * NULL then we can do HT40 (special case for RXON)
+ */
+ if (!ht_cap)
+ return true;
+
+ if (!ht_cap->ht_supported)
+ return false;
+
+ if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ return false;
+
+ return true;
+}
+
+static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx,
+ __le32 *flags, __le32 *mask)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ u8 mimo_ps_mode;
+
+ *mask = STA_FLG_RTS_MIMO_PROT_MSK |
+ STA_FLG_MIMO_DIS_MSK |
+ STA_FLG_HT40_EN_MSK |
+ STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENSITY_MSK;
+ *flags = 0;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ return;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+
+ IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
+ sta->addr,
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ "dynamic" : "disabled");
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ *flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ *flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ *flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ *flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ *flags |= STA_FLG_HT40_EN_MSK;
+}
+
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_sta *sta)
+{
+ u8 sta_id = iwl_sta_id(sta);
+ __le32 flags, mask;
+ struct iwl_addsta_cmd cmd;
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].sta.station_flags &= ~mask;
+ priv->stations[sta_id].sta.station_flags |= flags;
+ spin_unlock_bh(&priv->sta_lock);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mode = STA_CONTROL_MODIFY_MSK;
+ cmd.station_flags_msk = mask;
+ cmd.station_flags = flags;
+ cmd.sta.sta_id = sta_id;
+
+ return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
+}
+
+static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ __le32 flags, mask;
+
+ iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+ lockdep_assert_held(&priv->sta_lock);
+ priv->stations[index].sta.station_flags &= ~mask;
+ priv->stations[index].sta.station_flags |= flags;
+}
+
+/**
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct iwl_station_entry *station;
+ int i;
+ u8 sta_id = IWL_INVALID_STATION;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
+ if (ether_addr_equal(priv->stations[i].sta.sta.addr,
+ addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!priv->stations[i].used &&
+ sta_id == IWL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+ "added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+ ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+ "adding again.\n", sta_id, addr);
+ return sta_id;
+ }
+
+ station = &priv->stations[sta_id];
+ station->used = IWL_STA_DRIVER_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+ sta_id, addr);
+ priv->num_stations++;
+
+ /* Set up the REPLY_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct iwl_station_priv *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_set_ht_add_station(priv, sta_id, sta, ctx);
+
+ return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+ int ret = 0;
+ u8 sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_bh(&priv->sta_lock);
+ sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_bh(&priv->sta_lock);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+ "added.\n", sta_id);
+ spin_unlock_bh(&priv->sta_lock);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+ "adding again.\n", sta_id, addr);
+ spin_unlock_bh(&priv->sta_lock);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_addsta_cmd));
+ spin_unlock_bh(&priv->sta_lock);
+
+ /* Add station to device's station table */
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_bh(&priv->sta_lock);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[sta_id].sta.sta.addr);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_bh(&priv->sta_lock);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+ lockdep_assert_held(&priv->sta_lock);
+
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+ IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_send_remove_station(struct iwl_priv *priv,
+ const u8 *addr, int sta_id,
+ bool temporary)
+{
+ struct iwl_rx_packet *pkt;
+ int ret;
+ struct iwl_rem_sta_cmd rm_sta_cmd;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_REMOVE_STA,
+ .len = { sizeof(struct iwl_rem_sta_cmd), },
+ .flags = CMD_SYNC,
+ .data = { &rm_sta_cmd, },
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = iwl_dvm_send_cmd(priv, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
+ switch (rem_sta_resp->status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_bh(&priv->sta_lock);
+ iwl_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_bh(&priv->sta_lock);
+ }
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
+ }
+ }
+ iwl_free_resp(&cmd);
+
+ return ret;
+}
+
+/**
+ * iwl_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ u8 tid;
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_bh(&priv->sta_lock);
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ if (WARN_ON(priv->num_stations < 0))
+ priv->num_stations = 0;
+
+ spin_unlock_bh(&priv->sta_lock);
+
+ return iwl_send_remove_station(priv, addr, sta_id, false);
+out_err:
+ spin_unlock_bh(&priv->sta_lock);
+ return -EINVAL;
+}
+
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ u8 tid;
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id);
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return;
+
+ spin_lock_bh(&priv->sta_lock);
+
+ WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE));
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ if (WARN_ON_ONCE(priv->num_stations < 0))
+ priv->num_stations = 0;
+
+ spin_unlock_bh(&priv->sta_lock);
+}
+
+static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
+{
+ int i, r;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ lockdep_assert_held(&priv->mutex);
+
+ memset(link_cmd, 0, sizeof(*link_cmd));
+
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ r = IWL_RATE_6M_INDEX;
+ else if (ctx && ctx->vif && ctx->vif->p2p)
+ r = IWL_RATE_6M_INDEX;
+ else
+ r = IWL_RATE_1M_INDEX;
+
+ if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) <<
+ RATE_MCS_ANT_POS;
+ rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ first_antenna(priv->eeprom_data->valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->eeprom_data->valid_tx_ant &
+ ~first_antenna(priv->eeprom_data->valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->eeprom_data->valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th =
+ LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+}
+
+/**
+ * iwl_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ int i;
+ bool cleared = false;
+
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+ spin_lock_bh(&priv->sta_lock);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv,
+ "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_bh(&priv->sta_lock);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv,
+ "No active stations found to be cleared\n");
+}
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_bh(&priv->sta_lock);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &priv->stations[i].sta,
+ sizeof(struct iwl_addsta_cmd));
+ send_lq = false;
+ if (priv->stations[i].lq) {
+ if (priv->wowlan)
+ iwl_sta_fill_lq(priv, ctx, i, &lq);
+ else
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_bh(&priv->sta_lock);
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_bh(&priv->sta_lock);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].used &=
+ ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ continue;
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ iwl_send_lq_cmd(priv, ctx, &lq,
+ CMD_SYNC, true);
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_bh(&priv->sta_lock);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+ "no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+ "complete.\n");
+}
+
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &priv->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+ int i;
+
+ spin_lock_bh(&priv->sta_lock);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ if (WARN_ON(priv->num_stations < 0))
+ priv->num_stations = 0;
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
+ }
+ spin_unlock_bh(&priv->sta_lock);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static void iwl_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+ IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+ IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+ lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+ i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+ RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = { sizeof(struct iwl_link_quality_cmd), },
+ .flags = flags,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+
+ spin_lock_bh(&priv->sta_lock);
+ if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_bh(&priv->sta_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&priv->sta_lock);
+
+ iwl_dump_lq_cmd(priv, lq);
+ if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+ return -EINVAL;
+
+ if (is_lq_table_valid(priv, ctx, lq))
+ ret = iwl_dvm_send_cmd(priv, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete, "
+ "clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_bh(&priv->sta_lock);
+ }
+ return ret;
+}
+
+
+static struct iwl_link_quality_cmd *
+iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+
+ iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd);
+
+ return link_cmd;
+}
+
+/*
+ * iwlagn_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int iwlagn_add_bssid_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct iwl_link_quality_cmd *link_cmd;
+
+ if (sta_id_r)
+ *sta_id_r = IWL_INVALID_STATION;
+
+ ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].used |= IWL_STA_LOCAL;
+ spin_unlock_bh(&priv->sta_lock);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_bh(&priv->sta_lock);
+
+ return 0;
+}
+
+/*
+ * static WEP keys
+ *
+ * For each context, the device has a table of 4 static WEP keys
+ * (one for each key index) that is updated with the following
+ * commands.
+ */
+
+static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct iwl_wep_cmd) +
+ sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
+ struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct iwl_wep_cmd);
+ struct iwl_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = { wep_cmd, },
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0, cmd_size +
+ (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX ; i++) {
+ wep_cmd->key[i].key_index = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len[0] = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return iwl_dvm_send_cmd(priv, &cmd);
+ else
+ return 0;
+}
+
+int iwl_restore_default_wep_keys(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ return iwl_send_static_wepkey_cmd(priv, ctx, false);
+}
+
+int iwl_remove_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
+ keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
+ IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int iwl_set_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ IWL_DEBUG_WEP(priv,
+ "Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
+ IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
+ keyconf->keylen, keyconf->keyidx, ret);
+
+ return ret;
+}
+
+/*
+ * dynamic (per-station) keys
+ *
+ * The dynamic keys are a little more complicated. The device has
+ * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
+ * These are linked to stations by a table that contains an index
+ * into the key table for each station/key index/{mcast,unicast},
+ * i.e. it's basically an array of pointers like this:
+ * key_offset_t key_mapping[NUM_STATIONS][4][2];
+ * (it really works differently, but you can think of it as such)
+ *
+ * The key uploading and linking happens in the same command, the
+ * add station command with STA_MODIFY_KEY_MASK.
+ */
+
+static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (sta)
+ return iwl_sta_id(sta);
+
+ /*
+ * The device expects GTKs for station interfaces to be
+ * installed as GTKs for the AP station. If we have no
+ * station ID, then use the ap_sta_id in that case.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx)
+ return vif_priv->ctx->ap_sta_id;
+
+ return IWL_INVALID_STATION;
+}
+
+static int iwlagn_send_sta_key(struct iwl_priv *priv,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
+ u32 cmd_flags)
+{
+ __le16 key_flags;
+ struct iwl_addsta_cmd sta_cmd;
+ int i;
+
+ spin_lock_bh(&priv->sta_lock);
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+ spin_unlock_bh(&priv->sta_lock);
+
+ key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_flags |= STA_KEY_FLG_CCMP;
+ memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_flags |= STA_KEY_FLG_TKIP;
+ sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_flags |= STA_KEY_FLG_WEP;
+ memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ /* key pointer (offset) */
+ sta_cmd.key.key_offset = keyconf->hw_key_idx;
+
+ sta_cmd.key.key_flags = key_flags;
+ sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+ sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+
+ return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
+}
+
+void iwl_update_tkip_key(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+ u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
+
+ if (sta_id == IWL_INVALID_STATION)
+ return;
+
+ if (iwl_scan_cancel(priv)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ iwlagn_send_sta_key(priv, keyconf, sta_id,
+ iv32, phase1key, CMD_ASYNC);
+}
+
+int iwl_remove_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_addsta_cmd sta_cmd;
+ u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+ __le16 key_flags;
+
+ /* if station isn't there, neither is the key */
+ if (sta_id == IWL_INVALID_STATION)
+ return -ENOENT;
+
+ spin_lock_bh(&priv->sta_lock);
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
+ sta_id = IWL_INVALID_STATION;
+ spin_unlock_bh(&priv->sta_lock);
+
+ if (sta_id == IWL_INVALID_STATION)
+ return 0;
+
+ lockdep_assert_held(&priv->mutex);
+
+ ctx->key_mapping_keys--;
+
+ IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
+ IWL_ERR(priv, "offset %d not used in uCode key table.\n",
+ keyconf->hw_key_idx);
+
+ key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
+ STA_KEY_FLG_INVALID;
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ sta_cmd.key.key_flags = key_flags;
++ sta_cmd.key.key_offset = keyconf->hw_key_idx;
+ sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl_set_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+ int ret;
+ u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+ const u8 *addr;
+
+ if (sta_id == IWL_INVALID_STATION)
+ return -EINVAL;
+
+ lockdep_assert_held(&priv->mutex);
+
+ keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
+ if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
+ return -ENOSPC;
+
+ ctx->key_mapping_keys++;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta)
+ addr = sta->addr;
+ else /* station mode case only */
+ addr = ctx->active.bssid_addr;
+
+ /* pre-fill phase 1 key into device cache */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
+ seq.tkip.iv32, p1k, CMD_SYNC);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
+ 0, NULL, CMD_SYNC);
+ break;
+ default:
+ IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ ctx->key_mapping_keys--;
+ clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
+ }
+
+ IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta ? sta->addr : NULL, ret);
+
+ return ret;
+}
+
+/**
+ * iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+ u8 sta_id;
+
+ spin_lock_bh(&priv->sta_lock);
+ sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare broadcast station\n");
+ spin_unlock_bh(&priv->sta_lock);
+
+ return -EINVAL;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used |= IWL_STA_BCAST;
+ spin_unlock_bh(&priv->sta_lock);
+
+ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_bh(&priv->sta_lock);
+
+ return 0;
+}
+
+/**
+ * iwl_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwlagn. Placed here to have all bcast station management
+ * code together.
+ */
+int iwl_update_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&priv->sta_lock);
+ if (priv->stations[sta_id].lq)
+ kfree(priv->stations[sta_id].lq);
+ else
+ IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_bh(&priv->sta_lock);
+
+ return 0;
+}
+
+int iwl_update_bcast_stations(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int ret = 0;
+
+ for_each_context(priv, ctx) {
+ ret = iwl_update_bcast_station(priv, ctx);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+{
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+ spin_unlock_bh(&priv->sta_lock);
+
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn)
+{
+ int sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].sta.station_flags_msk = 0;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
+ priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+ spin_unlock_bh(&priv->sta_lock);
+
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid)
+{
+ int sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].sta.station_flags_msk = 0;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+ spin_unlock_bh(&priv->sta_lock);
+
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+
+
+void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
+{
+ struct iwl_addsta_cmd cmd = {
+ .mode = STA_CONTROL_MODIFY_MSK,
+ .station_flags = STA_FLG_PWR_SAVE_MSK,
+ .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+ .sta.sta_id = sta_id,
+ .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ };
+
+ iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
+}
--- /dev/null
- #define IWL6000G2_UCODE_API_MIN 4
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+#include "dvm/commands.h" /* needed for BT for now */
+
+/* Highest firmware API version supported */
+#define IWL6000_UCODE_API_MAX 6
+#define IWL6050_UCODE_API_MAX 5
+#define IWL6000G2_UCODE_API_MAX 6
++#define IWL6035_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
+#define IWL6000G2_UCODE_API_OK 5
+#define IWL6050_UCODE_API_OK 5
+#define IWL6000G2B_UCODE_API_OK 6
++#define IWL6035_UCODE_API_OK 6
+
+/* Lowest firmware API version supported */
+#define IWL6000_UCODE_API_MIN 4
+#define IWL6050_UCODE_API_MIN 4
- IWL_DEVICE_6030,
++#define IWL6000G2_UCODE_API_MIN 5
++#define IWL6035_UCODE_API_MIN 6
+
+/* EEPROM versions */
+#define EEPROM_6000_TX_POWER_VERSION (4)
+#define EEPROM_6000_EEPROM_VERSION (0x423)
+#define EEPROM_6050_TX_POWER_VERSION (4)
+#define EEPROM_6050_EEPROM_VERSION (0x532)
+#define EEPROM_6150_TX_POWER_VERSION (6)
+#define EEPROM_6150_EEPROM_VERSION (0x553)
+#define EEPROM_6005_TX_POWER_VERSION (6)
+#define EEPROM_6005_EEPROM_VERSION (0x709)
+#define EEPROM_6030_TX_POWER_VERSION (6)
+#define EEPROM_6030_EEPROM_VERSION (0x709)
+#define EEPROM_6035_TX_POWER_VERSION (6)
+#define EEPROM_6035_EEPROM_VERSION (0x753)
+
+#define IWL6000_FW_PRE "iwlwifi-6000-"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6050_FW_PRE "iwlwifi-6050-"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl6000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_base_params iwl6050_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 1024,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_base_params iwl6000_g2_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_ht_params iwl6000_ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+static const struct iwl_bt_params iwl6000_bt_params = {
+ /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+ .advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+ .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_sco_disable = true,
+};
+
+static const struct iwl_eeprom_params iwl6000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
+ .enhanced_txpower = true,
+};
+
+#define IWL_DEVICE_6005 \
+ .fw_name_pre = IWL6005_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6005, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
+ .base_params = &iwl6000_g2_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE
+
+const struct iwl_cfg iwl6005_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2abg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
+ IWL_DEVICE_6005,
+};
+
+const struct iwl_cfg iwl6005_2bg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
+ IWL_DEVICE_6005,
+};
+
+const struct iwl_cfg iwl6005_2agn_sff_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_d_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+#define IWL_DEVICE_6030 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6030, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .base_params = &iwl6000_g2_base_params, \
+ .bt_params = &iwl6000_bt_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+const struct iwl_cfg iwl6030_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+ IWL_DEVICE_6030,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6030_2abg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
+ IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl6030_2bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
+ IWL_DEVICE_6030,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6030_2bg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
+ IWL_DEVICE_6030,
+};
+
++#define IWL_DEVICE_6035 \
++ .fw_name_pre = IWL6030_FW_PRE, \
++ .ucode_api_max = IWL6035_UCODE_API_MAX, \
++ .ucode_api_ok = IWL6035_UCODE_API_OK, \
++ .ucode_api_min = IWL6035_UCODE_API_MIN, \
++ .device_family = IWL_DEVICE_FAMILY_6030, \
++ .max_inst_size = IWL60_RTC_INST_SIZE, \
++ .max_data_size = IWL60_RTC_DATA_SIZE, \
++ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
++ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
++ .base_params = &iwl6000_g2_base_params, \
++ .bt_params = &iwl6000_bt_params, \
++ .need_temp_offset_calib = true, \
++ .led_mode = IWL_LED_RF_STATE, \
++ .adv_pm = true
++
+const struct iwl_cfg iwl6035_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
++ IWL_DEVICE_6035,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl1030_bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
+ IWL_DEVICE_6030,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl1030_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
+ IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl130_bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
+ IWL_DEVICE_6030,
+ .ht_params = &iwl6000_ht_params,
+ .rx_with_siso_diversity = true,
+};
+
+const struct iwl_cfg iwl130_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
+ IWL_DEVICE_6030,
+ .rx_with_siso_diversity = true,
+};
+
+/*
+ * "i": Internal configuration, use internal Power Amplifier
+ */
+#define IWL_DEVICE_6000i \
+ .fw_name_pre = IWL6000_FW_PRE, \
+ .ucode_api_max = IWL6000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000_UCODE_API_OK, \
+ .ucode_api_min = IWL6000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6000i, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
+ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
+ .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
+ .base_params = &iwl6000_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
+ .led_mode = IWL_LED_BLINK
+
+const struct iwl_cfg iwl6000i_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
+ IWL_DEVICE_6000i,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6000i_2abg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
+ IWL_DEVICE_6000i,
+};
+
+const struct iwl_cfg iwl6000i_2bg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
+ IWL_DEVICE_6000i,
+};
+
+#define IWL_DEVICE_6050 \
+ .fw_name_pre = IWL6050_FW_PRE, \
+ .ucode_api_max = IWL6050_UCODE_API_MAX, \
+ .ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6050, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
+ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
+ .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
+ .base_params = &iwl6050_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
+ .led_mode = IWL_LED_BLINK, \
+ .internal_wimax_coex = true
+
+const struct iwl_cfg iwl6050_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
+ IWL_DEVICE_6050,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6050_2abg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
+ IWL_DEVICE_6050,
+};
+
+#define IWL_DEVICE_6150 \
+ .fw_name_pre = IWL6050_FW_PRE, \
+ .ucode_api_max = IWL6050_UCODE_API_MAX, \
+ .ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6150, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
+ .base_params = &iwl6050_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
+ .led_mode = IWL_LED_BLINK, \
+ .internal_wimax_coex = true
+
+const struct iwl_cfg iwl6150_bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
+ IWL_DEVICE_6150,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6150_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+ IWL_DEVICE_6150,
+};
+
+const struct iwl_cfg iwl6000_3agn_cfg = {
+ .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
+ .fw_name_pre = IWL6000_FW_PRE,
+ .ucode_api_max = IWL6000_UCODE_API_MAX,
+ .ucode_api_ok = IWL6000_UCODE_API_OK,
+ .ucode_api_min = IWL6000_UCODE_API_MIN,
+ .device_family = IWL_DEVICE_FAMILY_6000,
+ .max_inst_size = IWL60_RTC_INST_SIZE,
+ .max_data_size = IWL60_RTC_DATA_SIZE,
+ .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .base_params = &iwl6000_base_params,
+ .eeprom_params = &iwl6000_eeprom_params,
+ .ht_params = &iwl6000_ht_params,
+ .led_mode = IWL_LED_BLINK,
+};
+
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
--- /dev/null
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-agn-hw.h"
+#include "internal.h"
+/* FIXME: need to abstract out TX command (once we know what it looks like) */
+#include "dvm/commands.h"
+
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
+ (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
+ (~(1<<(trans_pcie)->cmd_queue)))
+
+static int iwl_trans_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct device *dev = trans->dev;
+
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+ spin_lock_init(&rxq->lock);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
+}
+
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ int i;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page,
+ trans_pcie->rx_page_order);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+}
+
+static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
+ struct iwl_rx_queue *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ if (trans_pcie->rx_buf_size_8k)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+static int iwl_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
+ int i, err;
+ unsigned long flags;
+
+ if (!rxq->bd) {
+ err = iwl_trans_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ iwl_trans_rxq_free_rx_bufs(trans);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ iwlagn_rx_replenish(trans);
+
+ iwl_trans_rx_hw_init(trans, rxq);
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(trans, rxq);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ unsigned long flags;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ iwl_trans_rxq_free_rx_bufs(trans);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+ memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts = NULL;
+}
+
+static int iwl_trans_rx_stop(struct iwl_trans *trans)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
+{
+ struct iwl_tx_queue *txq = (void *)data;
+ struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->q.read_ptr == txq->q.write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ jiffies_to_msecs(trans_pcie->wd_timeout));
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+ IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
+ & (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
+
+ iwl_op_mode_nic_error(trans->op_mode);
+}
+
+static int iwl_trans_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ int i;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
+ (unsigned long)txq);
+ txq->trans_pcie = trans_pcie;
+
+ txq->q.n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_tx_queue_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = txq_id;
+
+ return 0;
+error:
+ if (txq->entries && txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = 0;
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+ txq_id);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/**
+ * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ enum dma_data_direction dma_dir;
+
+ if (!q->n_bd)
+ return;
+
+ /* In the command queue, all the TBs are mapped as BIDI
+ * so unmap them as such.
+ */
+ if (txq_id == trans_pcie->cmd_queue)
+ dma_dir = DMA_BIDIRECTIONAL;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ spin_lock_bh(&txq->lock);
+ while (q->write_ptr != q->read_ptr) {
+ iwl_txq_free_tfd(trans, txq, dma_dir);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+ spin_unlock_bh(&txq->lock);
+}
+
+/**
+ * iwl_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct device *dev = trans->dev;
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_tx_queue_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < txq->q.n_window; i++)
+ kfree(txq->entries[i].cmd);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd) {
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ }
+
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ del_timer_sync(&txq->stuck_timer);
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+/**
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
+{
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Tx queues */
+ if (trans_pcie->txq) {
+ for (txq_id = 0;
+ txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ iwl_tx_queue_free(trans, txq_id);
+ }
+
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
+
+ iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
+
+ iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/**
+ * iwl_trans_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+static int iwl_trans_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_tx_queue), GFP_KERNEL);
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_trans_pcie_tx_free(trans);
+
+ return ret;
+}
+static int iwl_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+ bool alloc = false;
+
+ if (!trans_pcie->txq) {
+ ret = iwl_trans_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_write_prph(trans, SCD_TXFACT, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_trans_pcie_tx_free(trans);
+ return ret;
+}
+
+static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int pos;
+ u16 pci_lnk_ctl;
+
+ struct pci_dev *pci_dev = trans_pcie->pci_dev;
+
+ pos = pci_pcie_cap(pci_dev);
+ pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+static void iwl_apm_config(struct iwl_trans *trans)
+{
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ u16 lctl = iwl_pciexp_link_ctrl(trans);
+
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, trans->dev,
+ "L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, trans->dev,
+ "L1 Disabled; Enabling L0S\n");
+ }
+ trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_apm_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = 0;
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwl_apm_config(trans);
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (trans->cfg->base_params->pll_cfg_val)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG,
+ trans->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+out:
+ return ret;
+}
+
+static int iwl_apm_stop_master(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(trans, "stop master\n");
+
+ return ret;
+}
+
+static void iwl_apm_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+ /* Stop device's DMA activity */
+ iwl_apm_stop_master(trans);
+
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+static int iwl_nic_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ /* nic_init */
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_apm_init(trans);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ iwl_set_pwr_vmain(trans);
+
+ iwl_op_mode_nic_config(trans->op_mode);
+
+#ifndef CONFIG_IWLWIFI_IDI
+ /* Allocate the RX queue, or reset if it is already allocated */
+ iwl_rx_init(trans);
+#endif
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_tx_init(trans))
+ return -ENOMEM;
+
+ if (trans->cfg->base_params->shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+ }
+
+ return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_set_hw_ready(struct iwl_trans *trans)
+{
+ int ret;
+
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+
+ IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+ return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+static int iwl_prepare_card_hw(struct iwl_trans *trans)
+{
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+ ret = iwl_set_hw_ready(trans);
+ /* If the card is ready, exit 0 */
+ if (ret >= 0)
+ return 0;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ if (ret < 0)
+ return ret;
+
+ /* HW should be ready by now, check again. */
+ ret = iwl_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
+ return ret;
+}
+
+/*
+ * ucode
+ */
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+ const struct fw_desc *section)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ dma_addr_t phy_addr = section->p_addr;
+ u32 byte_cnt = section->len;
+ u32 dst_addr = section->offset;
+ int ret;
+
+ trans_pcie->ucode_write_complete = false;
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(trans,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+ dst_addr);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+ section_num);
+ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+ trans_pcie->ucode_write_complete, 5 * HZ);
+ if (!ret) {
+ IWL_ERR(trans, "Could not load the [%d] uCode section\n",
+ section_num);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].p_addr)
+ break;
+
+ ret = iwl_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(trans, CSR_RESET, 0);
+
+ return 0;
+}
+
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ int ret;
+ bool hw_rfkill;
+
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ if (hw_rfkill)
+ return -ERFKILL;
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+ iwl_enable_interrupts(trans);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Load the given image to the HW */
+ return iwl_load_given_ucode(trans, fw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under the irq lock and with MAC access
+ */
+static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+{
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
+
+ iwl_write_prph(trans, SCD_TXFACT, mask);
+}
+
+static void iwl_tx_start(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+ a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ /* reset conext data memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+ /* reset tx status memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+ for (; a < trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(
+ trans->cfg->base_params->num_of_queues);
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
++ /* The chain extension of the SCD doesn't work well. This feature is
++ * enabled by default by the HW, so we need to disable it manually.
++ */
++ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
++
+ for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
+ int fifo = trans_pcie->setup_q_to_fifo[i];
+
+ __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
+ IWL_TID_NON_QOS,
+ SCD_FRAME_LIMIT, 0);
+ }
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
+{
+ iwl_reset_ict(trans);
+ iwl_tx_start(trans);
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+static int iwl_trans_tx_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ch, txq_id, ret;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+ iwl_trans_txq_set_sched(trans, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
+ if (ret < 0)
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]",
+ ch,
+ iwl_read_direct32(trans,
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ if (!trans_pcie->txq) {
+ IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+ return 0;
+ }
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++)
+ iwl_tx_queue_unmap(trans, txq_id);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ /* device going down, Stop using ICT table */
+ iwl_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+ iwl_trans_tx_stop(trans);
+#ifndef CONFIG_IWLWIFI_IDI
+ iwl_trans_rx_stop(trans);
+#endif
+ /* Power-down device's busmaster DMA clocks */
+ iwl_write_prph(trans, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_apm_stop(trans);
+
+ /* Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * Clean again the interrupt here
+ */
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ iwl_enable_rfkill_int(trans);
+
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(trans_pcie->irq);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* clear all status bits */
+ clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+}
+
+static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+{
+ /* let the ucode operate on its own */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ iwl_disable_interrupts(trans);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ dma_addr_t phys_addr = 0;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u8 wait_write_ptr = 0;
+ __le16 fc = hdr->frame_control;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 __maybe_unused wifi_seq;
+
+ txq = &trans_pcie->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ spin_lock(&txq->lock);
+
+ /* Set up driver data for this TFD */
+ txq->entries[q->write_ptr].skb = skb;
+ txq->entries[q->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.cmd = REPLY_TX;
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[q->write_ptr].meta;
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = dma_map_single(trans->dev,
+ &dev_cmd->hdr, firstlen,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+ goto out_err;
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+
+ if (!ieee80211_has_morefrags(fc)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
+ secondlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+ dma_unmap_single(trans->dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ DMA_BIDIRECTIONAL);
+ goto out_err;
+ }
+ }
+
+ /* Attach buffers to TFD */
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
+ if (secondlen > 0)
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+ secondlen, 0);
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+ le16_to_cpu(dev_cmd->hdr.sequence));
+ IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+
+ dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(trans->dev,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* start timer if queue currently empty */
+ if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(trans, txq);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+ if (iwl_queue_space(q) < q->high_mark) {
+ if (wait_write_ptr) {
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(trans, txq);
+ } else {
+ iwl_stop_queue(trans, txq);
+ }
+ }
+ spin_unlock(&txq->lock);
+ return 0;
+ out_err:
+ spin_unlock(&txq->lock);
+ return -1;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int err;
+ bool hw_rfkill;
+
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
+ if (!trans_pcie->irq_requested) {
+ tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)trans);
+
+ iwl_alloc_isr_ict(trans);
+
+ err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
+ DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n",
+ trans_pcie->irq);
+ goto error;
+ }
+
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
+ trans_pcie->irq_requested = true;
+ }
+
+ err = iwl_prepare_card_hw(trans);
+ if (err) {
+ IWL_ERR(trans, "Error while preparing HW: %d", err);
+ goto err_free_irq;
+ }
+
+ iwl_apm_init(trans);
+
+ /* From now on, the op_mode will be kept updated about RF kill state */
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return err;
+
+err_free_irq:
+ free_irq(trans_pcie->irq, trans);
+error:
+ iwl_free_isr_ict(trans);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+ return err;
+}
+
+static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
+ bool op_mode_leaving)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+ unsigned long flags;
+
+ iwl_apm_stop(trans);
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ if (!op_mode_leaving) {
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ }
+}
+
+static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ /* n_bd is usually 256 => n_bd - 1 = 0xff */
+ int tfd_num = ssn & (txq->q.n_bd - 1);
+ int freed = 0;
+
+ spin_lock(&txq->lock);
+
+ if (txq->q.read_ptr != tfd_num) {
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+ txq_id, txq->q.read_ptr, tfd_num, ssn);
+ freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
+ }
+
+ spin_unlock(&txq->lock);
+}
+
+static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+ if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+ trans_pcie->n_no_reclaim_cmds = 0;
+ else
+ trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+ if (trans_pcie->n_no_reclaim_cmds)
+ memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+ trans_pcie->n_no_reclaim_cmds * sizeof(u8));
+
+ trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
+
+ if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
+ trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
+
+ /* at least the command queue must be mapped */
+ WARN_ON(!trans_pcie->n_q_to_fifo);
+
+ memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
+ trans_pcie->n_q_to_fifo * sizeof(u8));
+
+ trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
+ if (trans_pcie->rx_buf_size_8k)
+ trans_pcie->rx_page_order = get_order(8 * 1024);
+ else
+ trans_pcie->rx_page_order = get_order(4 * 1024);
+
+ trans_pcie->wd_timeout =
+ msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
+
+ trans_pcie->command_names = trans_cfg->command_names;
+}
+
+void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_trans_pcie_tx_free(trans);
+#ifndef CONFIG_IWLWIFI_IDI
+ iwl_trans_pcie_rx_free(trans);
+#endif
+ if (trans_pcie->irq_requested == true) {
+ free_irq(trans_pcie->irq, trans);
+ iwl_free_isr_ict(trans);
+ }
+
+ pci_disable_msi(trans_pcie->pci_dev);
+ iounmap(trans_pcie->hw_base);
+ pci_release_regions(trans_pcie->pci_dev);
+ pci_disable_device(trans_pcie->pci_dev);
+ kmem_cache_destroy(trans->dev_cmd_pool);
+
+ kfree(trans);
+}
+
+static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (state)
+ set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ else
+ clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+ return 0;
+}
+
+static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+ bool hw_rfkill;
+
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ if (!hw_rfkill)
+ iwl_enable_interrupts(trans);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#define IWL_FLUSH_WAIT_MS 2000
+
+static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ int cnt;
+ unsigned long now = jiffies;
+ int ret = 0;
+
+ /* waiting for all the tx frames complete might take a while */
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ if (cnt == trans_pcie->cmd_queue)
+ continue;
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ while (q->read_ptr != q->write_ptr && !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ msleep(1);
+
+ if (q->read_ptr != q->write_ptr) {
+ IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ return ret;
+}
+
+static const char *get_fh_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(trans, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(trans, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(trans, fh_tbl[i]));
+ }
+ return 0;
+}
+
+static const char *get_csr_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD
+}
+
+void iwl_dump_csr(struct iwl_trans *trans)
+{
+ int i;
+ static const u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(trans, "CSR values:\n");
+ IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(trans, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(trans, csr_tbl[i]));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops)) \
+ return -ENOMEM; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ size_t bufsz;
+
+ bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
+
+ if (!trans_pcie->txq)
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
+ cnt, q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, trans_pcie->queue_used),
+ !!test_bit(cnt, trans_pcie->queue_stopped));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ int pos = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ isr_stats->hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ isr_stats->sw);
+ if (isr_stats->sw || isr_stats->hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ isr_stats->err_code);
+ }
+#ifdef CONFIG_IWLWIFI_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ isr_stats->sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ isr_stats->alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ isr_stats->ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ isr_stats->wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n", isr_stats->rx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ isr_stats->tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ isr_stats->unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ memset(isr_stats, 0, sizeof(*isr_stats));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ iwl_dump_csr(trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ ret = pos = iwl_dump_fh(trans, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+
+ if (!trans->op_mode)
+ return -EAGAIN;
+
+ iwl_op_mode_nic_error(trans->op_mode);
+
+ return count;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_WRITE_FILE_OPS(fw_restart);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
+ return 0;
+}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ return 0;
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+static const struct iwl_trans_ops trans_ops_pcie = {
+ .start_hw = iwl_trans_pcie_start_hw,
+ .stop_hw = iwl_trans_pcie_stop_hw,
+ .fw_alive = iwl_trans_pcie_fw_alive,
+ .start_fw = iwl_trans_pcie_start_fw,
+ .stop_device = iwl_trans_pcie_stop_device,
+
+ .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
+
+ .send_cmd = iwl_trans_pcie_send_cmd,
+
+ .tx = iwl_trans_pcie_tx,
+ .reclaim = iwl_trans_pcie_reclaim,
+
+ .txq_disable = iwl_trans_pcie_txq_disable,
+ .txq_enable = iwl_trans_pcie_txq_enable,
+
+ .dbgfs_register = iwl_trans_pcie_dbgfs_register,
+
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwl_trans_pcie_suspend,
+ .resume = iwl_trans_pcie_resume,
+#endif
+ .write8 = iwl_trans_pcie_write8,
+ .write32 = iwl_trans_pcie_write32,
+ .read32 = iwl_trans_pcie_read32,
+ .configure = iwl_trans_pcie_configure,
+ .set_pmi = iwl_trans_pcie_set_pmi,
+};
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ const struct iwl_cfg *cfg)
+{
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ char cmd_pool_name[100];
+ u16 pci_cmd;
+ int err;
+
+ trans = kzalloc(sizeof(struct iwl_trans) +
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+
+ if (WARN_ON(!trans))
+ return NULL;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans->ops = &trans_ops_pcie;
+ trans->cfg = cfg;
+ trans_pcie->trans = trans;
+ spin_lock_init(&trans_pcie->irq_lock);
+ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_no_pci;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+ goto out_pci_disable_device;
+ }
+
+ trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!trans_pcie->hw_base) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_base = %p\n", trans_pcie->hw_base);
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "HW Revision ID = 0x%X\n", pdev->revision);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ err = pci_enable_msi(pdev);
+ if (err)
+ dev_printk(KERN_ERR, &pdev->dev,
+ "pci_enable_msi failed(0X%x)", err);
+
+ trans->dev = &pdev->dev;
+ trans_pcie->irq = pdev->irq;
+ trans_pcie->pci_dev = pdev;
+ trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+ snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+ "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+
+ /* TODO: Move this away, not needed if not MSI */
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans->wait_command_queue);
+ spin_lock_init(&trans->reg_lock);
+
+ snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s",
+ dev_name(trans->dev));
+
+ trans->dev_cmd_headroom = 0;
+ trans->dev_cmd_pool =
+ kmem_cache_create(cmd_pool_name,
+ sizeof(struct iwl_device_cmd)
+ + trans->dev_cmd_headroom,
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!trans->dev_cmd_pool)
+ goto out_pci_disable_msi;
+
+ return trans;
+
+out_pci_disable_msi:
+ pci_disable_msi(pdev);
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_no_pci:
+ kfree(trans);
+ return NULL;
+}