1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/sched.h>
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_arp.h>
40 #include <net/mac80211.h>
42 #include <asm/div64.h>
44 #include "iwl-eeprom.h"
48 #include "iwl-agn-calib.h"
50 #include "iwl-shared.h"
51 #include "iwl-trans.h"
52 #include "iwl-op-mode.h"
54 /******************************************************************************
58 ******************************************************************************/
61 * module name, copyright, version, etc.
63 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
65 #ifdef CONFIG_IWLWIFI_DEBUG
71 #define DRV_VERSION IWLWIFI_VERSION VD
74 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
75 MODULE_VERSION(DRV_VERSION
);
76 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
77 MODULE_LICENSE("GPL");
78 MODULE_ALIAS("iwlagn");
80 void iwl_update_chain_flags(struct iwl_priv
*priv
)
82 struct iwl_rxon_context
*ctx
;
84 for_each_context(priv
, ctx
) {
85 iwlagn_set_rxon_chain(priv
, ctx
);
86 if (ctx
->active
.rx_chain
!= ctx
->staging
.rx_chain
)
87 iwlagn_commit_rxon(priv
, ctx
);
91 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
92 static void iwl_set_beacon_tim(struct iwl_priv
*priv
,
93 struct iwl_tx_beacon_cmd
*tx_beacon_cmd
,
94 u8
*beacon
, u32 frame_size
)
97 struct ieee80211_mgmt
*mgmt
= (struct ieee80211_mgmt
*)beacon
;
100 * The index is relative to frame start but we start looking at the
101 * variable-length part of the beacon.
103 tim_idx
= mgmt
->u
.beacon
.variable
- beacon
;
105 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
106 while ((tim_idx
< (frame_size
- 2)) &&
107 (beacon
[tim_idx
] != WLAN_EID_TIM
))
108 tim_idx
+= beacon
[tim_idx
+1] + 2;
110 /* If TIM field was found, set variables */
111 if ((tim_idx
< (frame_size
- 1)) && (beacon
[tim_idx
] == WLAN_EID_TIM
)) {
112 tx_beacon_cmd
->tim_idx
= cpu_to_le16(tim_idx
);
113 tx_beacon_cmd
->tim_size
= beacon
[tim_idx
+1];
115 IWL_WARN(priv
, "Unable to find TIM Element in beacon\n");
118 int iwlagn_send_beacon_cmd(struct iwl_priv
*priv
)
120 struct iwl_tx_beacon_cmd
*tx_beacon_cmd
;
121 struct iwl_host_cmd cmd
= {
122 .id
= REPLY_TX_BEACON
,
125 struct ieee80211_tx_info
*info
;
131 * We have to set up the TX command, the TX Beacon command, and the
135 lockdep_assert_held(&priv
->mutex
);
137 if (!priv
->beacon_ctx
) {
138 IWL_ERR(priv
, "trying to build beacon w/o beacon context!\n");
142 if (WARN_ON(!priv
->beacon_skb
))
145 /* Allocate beacon command */
146 if (!priv
->beacon_cmd
)
147 priv
->beacon_cmd
= kzalloc(sizeof(*tx_beacon_cmd
), GFP_KERNEL
);
148 tx_beacon_cmd
= priv
->beacon_cmd
;
152 frame_size
= priv
->beacon_skb
->len
;
154 /* Set up TX command fields */
155 tx_beacon_cmd
->tx
.len
= cpu_to_le16((u16
)frame_size
);
156 tx_beacon_cmd
->tx
.sta_id
= priv
->beacon_ctx
->bcast_sta_id
;
157 tx_beacon_cmd
->tx
.stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
158 tx_beacon_cmd
->tx
.tx_flags
= TX_CMD_FLG_SEQ_CTL_MSK
|
159 TX_CMD_FLG_TSF_MSK
| TX_CMD_FLG_STA_RATE_MSK
;
161 /* Set up TX beacon command fields */
162 iwl_set_beacon_tim(priv
, tx_beacon_cmd
, priv
->beacon_skb
->data
,
165 /* Set up packet rate and flags */
166 info
= IEEE80211_SKB_CB(priv
->beacon_skb
);
169 * Let's set up the rate at least somewhat correctly;
170 * it will currently not actually be used by the uCode,
171 * it uses the broadcast station's rate instead.
173 if (info
->control
.rates
[0].idx
< 0 ||
174 info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
)
177 rate
= info
->control
.rates
[0].idx
;
179 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
180 hw_params(priv
).valid_tx_ant
);
181 rate_flags
= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
183 /* In mac80211, rates for 5 GHz start at 0 */
184 if (info
->band
== IEEE80211_BAND_5GHZ
)
185 rate
+= IWL_FIRST_OFDM_RATE
;
186 else if (rate
>= IWL_FIRST_CCK_RATE
&& rate
<= IWL_LAST_CCK_RATE
)
187 rate_flags
|= RATE_MCS_CCK_MSK
;
189 tx_beacon_cmd
->tx
.rate_n_flags
=
190 iwl_hw_set_rate_n_flags(rate
, rate_flags
);
193 cmd
.len
[0] = sizeof(*tx_beacon_cmd
);
194 cmd
.data
[0] = tx_beacon_cmd
;
195 cmd
.dataflags
[0] = IWL_HCMD_DFL_NOCOPY
;
196 cmd
.len
[1] = frame_size
;
197 cmd
.data
[1] = priv
->beacon_skb
->data
;
198 cmd
.dataflags
[1] = IWL_HCMD_DFL_NOCOPY
;
200 return iwl_dvm_send_cmd(priv
, &cmd
);
203 static void iwl_bg_beacon_update(struct work_struct
*work
)
205 struct iwl_priv
*priv
=
206 container_of(work
, struct iwl_priv
, beacon_update
);
207 struct sk_buff
*beacon
;
209 mutex_lock(&priv
->mutex
);
210 if (!priv
->beacon_ctx
) {
211 IWL_ERR(priv
, "updating beacon w/o beacon context!\n");
215 if (priv
->beacon_ctx
->vif
->type
!= NL80211_IFTYPE_AP
) {
217 * The ucode will send beacon notifications even in
218 * IBSS mode, but we don't want to process them. But
219 * we need to defer the type check to here due to
220 * requiring locking around the beacon_ctx access.
225 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
226 beacon
= ieee80211_beacon_get(priv
->hw
, priv
->beacon_ctx
->vif
);
228 IWL_ERR(priv
, "update beacon failed -- keeping old\n");
232 /* new beacon skb is allocated every time; dispose previous.*/
233 dev_kfree_skb(priv
->beacon_skb
);
235 priv
->beacon_skb
= beacon
;
237 iwlagn_send_beacon_cmd(priv
);
239 mutex_unlock(&priv
->mutex
);
242 static void iwl_bg_bt_runtime_config(struct work_struct
*work
)
244 struct iwl_priv
*priv
=
245 container_of(work
, struct iwl_priv
, bt_runtime_config
);
247 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
250 /* dont send host command if rf-kill is on */
251 if (!iwl_is_ready_rf(priv
))
253 iwlagn_send_advance_bt_config(priv
);
256 static void iwl_bg_bt_full_concurrency(struct work_struct
*work
)
258 struct iwl_priv
*priv
=
259 container_of(work
, struct iwl_priv
, bt_full_concurrency
);
260 struct iwl_rxon_context
*ctx
;
262 mutex_lock(&priv
->mutex
);
264 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
267 /* dont send host command if rf-kill is on */
268 if (!iwl_is_ready_rf(priv
))
271 IWL_DEBUG_INFO(priv
, "BT coex in %s mode\n",
272 priv
->bt_full_concurrent
?
273 "full concurrency" : "3-wire");
276 * LQ & RXON updated cmds must be sent before BT Config cmd
277 * to avoid 3-wire collisions
279 for_each_context(priv
, ctx
) {
280 iwlagn_set_rxon_chain(priv
, ctx
);
281 iwlagn_commit_rxon(priv
, ctx
);
284 iwlagn_send_advance_bt_config(priv
);
286 mutex_unlock(&priv
->mutex
);
290 * iwl_bg_statistics_periodic - Timer callback to queue statistics
292 * This callback is provided in order to send a statistics request.
294 * This timer function is continually reset to execute within
295 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
296 * was received. We need to ensure we receive the statistics in order
297 * to update the temperature used for calibrating the TXPOWER.
299 static void iwl_bg_statistics_periodic(unsigned long data
)
301 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
303 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
306 /* dont send host command if rf-kill is on */
307 if (!iwl_is_ready_rf(priv
))
310 iwl_send_statistics_request(priv
, CMD_ASYNC
, false);
314 static void iwl_print_cont_event_trace(struct iwl_priv
*priv
, u32 base
,
315 u32 start_idx
, u32 num_events
,
316 u32 capacity
, u32 mode
)
319 u32 ptr
; /* SRAM byte address of log data */
320 u32 ev
, time
, data
; /* event log data */
321 unsigned long reg_flags
;
324 ptr
= base
+ (4 * sizeof(u32
)) + (start_idx
* 2 * sizeof(u32
));
326 ptr
= base
+ (4 * sizeof(u32
)) + (start_idx
* 3 * sizeof(u32
));
328 /* Make sure device is powered up for SRAM reads */
329 spin_lock_irqsave(&trans(priv
)->reg_lock
, reg_flags
);
330 if (unlikely(!iwl_grab_nic_access(trans(priv
)))) {
331 spin_unlock_irqrestore(&trans(priv
)->reg_lock
, reg_flags
);
335 /* Set starting address; reads will auto-increment */
336 iwl_write32(trans(priv
), HBUS_TARG_MEM_RADDR
, ptr
);
339 * Refuse to read more than would have fit into the log from
340 * the current start_idx. This used to happen due to the race
341 * described below, but now WARN because the code below should
342 * prevent it from happening here.
344 if (WARN_ON(num_events
> capacity
- start_idx
))
345 num_events
= capacity
- start_idx
;
348 * "time" is actually "data" for mode 0 (no timestamp).
349 * place event id # at far right for easier visual parsing.
351 for (i
= 0; i
< num_events
; i
++) {
352 ev
= iwl_read32(trans(priv
), HBUS_TARG_MEM_RDAT
);
353 time
= iwl_read32(trans(priv
), HBUS_TARG_MEM_RDAT
);
355 trace_iwlwifi_dev_ucode_cont_event(
356 trans(priv
)->dev
, 0, time
, ev
);
358 data
= iwl_read32(trans(priv
), HBUS_TARG_MEM_RDAT
);
359 trace_iwlwifi_dev_ucode_cont_event(
360 trans(priv
)->dev
, time
, data
, ev
);
363 /* Allow device to power down */
364 iwl_release_nic_access(trans(priv
));
365 spin_unlock_irqrestore(&trans(priv
)->reg_lock
, reg_flags
);
368 static void iwl_continuous_event_trace(struct iwl_priv
*priv
)
370 u32 capacity
; /* event log capacity in # entries */
377 u32 base
; /* SRAM byte address of event log header */
378 u32 mode
; /* 0 - no timestamp, 1 - timestamp recorded */
379 u32 num_wraps
; /* # times uCode wrapped to top of log */
380 u32 next_entry
; /* index of next entry to be written by uCode */
382 base
= priv
->device_pointers
.log_event_table
;
383 if (iwlagn_hw_valid_rtc_data_addr(base
)) {
384 iwl_read_targ_mem_words(trans(priv
), base
, &read
, sizeof(read
));
386 capacity
= read
.capacity
;
388 num_wraps
= read
.wrap_counter
;
389 next_entry
= read
.write_counter
;
394 * Unfortunately, the uCode doesn't use temporary variables.
395 * Therefore, it can happen that we read next_entry == capacity,
396 * which really means next_entry == 0.
398 if (unlikely(next_entry
== capacity
))
401 * Additionally, the uCode increases the write pointer before
402 * the wraps counter, so if the write pointer is smaller than
403 * the old write pointer (wrap occurred) but we read that no
404 * wrap occurred, we actually read between the next_entry and
405 * num_wraps update (this does happen in practice!!) -- take
406 * that into account by increasing num_wraps.
408 if (unlikely(next_entry
< priv
->event_log
.next_entry
&&
409 num_wraps
== priv
->event_log
.num_wraps
))
412 if (num_wraps
== priv
->event_log
.num_wraps
) {
413 iwl_print_cont_event_trace(
414 priv
, base
, priv
->event_log
.next_entry
,
415 next_entry
- priv
->event_log
.next_entry
,
418 priv
->event_log
.non_wraps_count
++;
420 if (num_wraps
- priv
->event_log
.num_wraps
> 1)
421 priv
->event_log
.wraps_more_count
++;
423 priv
->event_log
.wraps_once_count
++;
425 trace_iwlwifi_dev_ucode_wrap_event(trans(priv
)->dev
,
426 num_wraps
- priv
->event_log
.num_wraps
,
427 next_entry
, priv
->event_log
.next_entry
);
429 if (next_entry
< priv
->event_log
.next_entry
) {
430 iwl_print_cont_event_trace(
431 priv
, base
, priv
->event_log
.next_entry
,
432 capacity
- priv
->event_log
.next_entry
,
435 iwl_print_cont_event_trace(
436 priv
, base
, 0, next_entry
, capacity
, mode
);
438 iwl_print_cont_event_trace(
439 priv
, base
, next_entry
,
440 capacity
- next_entry
,
443 iwl_print_cont_event_trace(
444 priv
, base
, 0, next_entry
, capacity
, mode
);
448 priv
->event_log
.num_wraps
= num_wraps
;
449 priv
->event_log
.next_entry
= next_entry
;
453 * iwl_bg_ucode_trace - Timer callback to log ucode event
455 * The timer is continually set to execute every
456 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
457 * this function is to perform continuous uCode event logging operation
460 static void iwl_bg_ucode_trace(unsigned long data
)
462 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
464 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
467 if (priv
->event_log
.ucode_trace
) {
468 iwl_continuous_event_trace(priv
);
469 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
470 mod_timer(&priv
->ucode_trace
,
471 jiffies
+ msecs_to_jiffies(UCODE_TRACE_PERIOD
));
475 static void iwl_bg_tx_flush(struct work_struct
*work
)
477 struct iwl_priv
*priv
=
478 container_of(work
, struct iwl_priv
, tx_flush
);
480 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
483 /* do nothing if rf-kill is on */
484 if (!iwl_is_ready_rf(priv
))
487 IWL_DEBUG_INFO(priv
, "device request: flush all tx frames\n");
488 iwlagn_dev_txfifo_flush(priv
, IWL_DROP_ALL
);
492 * queue/FIFO/AC mapping definitions
495 #define IWL_TX_FIFO_BK 0 /* shared */
496 #define IWL_TX_FIFO_BE 1
497 #define IWL_TX_FIFO_VI 2 /* shared */
498 #define IWL_TX_FIFO_VO 3
499 #define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
500 #define IWL_TX_FIFO_BE_IPAN 4
501 #define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
502 #define IWL_TX_FIFO_VO_IPAN 5
503 /* re-uses the VO FIFO, uCode will properly flush/schedule */
504 #define IWL_TX_FIFO_AUX 5
505 #define IWL_TX_FIFO_UNUSED -1
507 #define IWLAGN_CMD_FIFO_NUM 7
510 * This queue number is required for proper operation
511 * because the ucode will stop/start the scheduler as
514 #define IWL_IPAN_MCAST_QUEUE 8
516 static const u8 iwlagn_default_queue_to_tx_fifo
[] = {
524 static const u8 iwlagn_ipan_queue_to_tx_fifo
[] = {
538 static const u8 iwlagn_bss_ac_to_fifo
[] = {
545 static const u8 iwlagn_bss_ac_to_queue
[] = {
549 static const u8 iwlagn_pan_ac_to_fifo
[] = {
556 static const u8 iwlagn_pan_ac_to_queue
[] = {
560 static const u8 iwlagn_bss_queue_to_ac
[] = {
567 static const u8 iwlagn_pan_queue_to_ac
[] = {
578 static void iwl_init_context(struct iwl_priv
*priv
, u32 ucode_flags
)
583 * The default context is always valid,
584 * the PAN context depends on uCode.
586 priv
->valid_contexts
= BIT(IWL_RXON_CTX_BSS
);
587 if (ucode_flags
& IWL_UCODE_TLV_FLAGS_PAN
)
588 priv
->valid_contexts
|= BIT(IWL_RXON_CTX_PAN
);
590 for (i
= 0; i
< NUM_IWL_RXON_CTX
; i
++)
591 priv
->contexts
[i
].ctxid
= i
;
593 priv
->contexts
[IWL_RXON_CTX_BSS
].always_active
= true;
594 priv
->contexts
[IWL_RXON_CTX_BSS
].is_active
= true;
595 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_cmd
= REPLY_RXON
;
596 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_timing_cmd
= REPLY_RXON_TIMING
;
597 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_assoc_cmd
= REPLY_RXON_ASSOC
;
598 priv
->contexts
[IWL_RXON_CTX_BSS
].qos_cmd
= REPLY_QOS_PARAM
;
599 priv
->contexts
[IWL_RXON_CTX_BSS
].ap_sta_id
= IWL_AP_ID
;
600 priv
->contexts
[IWL_RXON_CTX_BSS
].wep_key_cmd
= REPLY_WEPKEY
;
601 priv
->contexts
[IWL_RXON_CTX_BSS
].bcast_sta_id
= IWLAGN_BROADCAST_ID
;
602 priv
->contexts
[IWL_RXON_CTX_BSS
].exclusive_interface_modes
=
603 BIT(NL80211_IFTYPE_ADHOC
);
604 priv
->contexts
[IWL_RXON_CTX_BSS
].interface_modes
=
605 BIT(NL80211_IFTYPE_STATION
);
606 priv
->contexts
[IWL_RXON_CTX_BSS
].ap_devtype
= RXON_DEV_TYPE_AP
;
607 priv
->contexts
[IWL_RXON_CTX_BSS
].ibss_devtype
= RXON_DEV_TYPE_IBSS
;
608 priv
->contexts
[IWL_RXON_CTX_BSS
].station_devtype
= RXON_DEV_TYPE_ESS
;
609 priv
->contexts
[IWL_RXON_CTX_BSS
].unused_devtype
= RXON_DEV_TYPE_ESS
;
610 memcpy(priv
->contexts
[IWL_RXON_CTX_BSS
].ac_to_queue
,
611 iwlagn_bss_ac_to_queue
, sizeof(iwlagn_bss_ac_to_queue
));
612 memcpy(priv
->contexts
[IWL_RXON_CTX_BSS
].ac_to_fifo
,
613 iwlagn_bss_ac_to_fifo
, sizeof(iwlagn_bss_ac_to_fifo
));
615 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_cmd
= REPLY_WIPAN_RXON
;
616 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_timing_cmd
=
617 REPLY_WIPAN_RXON_TIMING
;
618 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_assoc_cmd
=
619 REPLY_WIPAN_RXON_ASSOC
;
620 priv
->contexts
[IWL_RXON_CTX_PAN
].qos_cmd
= REPLY_WIPAN_QOS_PARAM
;
621 priv
->contexts
[IWL_RXON_CTX_PAN
].ap_sta_id
= IWL_AP_ID_PAN
;
622 priv
->contexts
[IWL_RXON_CTX_PAN
].wep_key_cmd
= REPLY_WIPAN_WEPKEY
;
623 priv
->contexts
[IWL_RXON_CTX_PAN
].bcast_sta_id
= IWLAGN_PAN_BCAST_ID
;
624 priv
->contexts
[IWL_RXON_CTX_PAN
].station_flags
= STA_FLG_PAN_STATION
;
625 priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
=
626 BIT(NL80211_IFTYPE_STATION
) | BIT(NL80211_IFTYPE_AP
);
628 if (ucode_flags
& IWL_UCODE_TLV_FLAGS_P2P
)
629 priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
|=
630 BIT(NL80211_IFTYPE_P2P_CLIENT
) |
631 BIT(NL80211_IFTYPE_P2P_GO
);
633 priv
->contexts
[IWL_RXON_CTX_PAN
].ap_devtype
= RXON_DEV_TYPE_CP
;
634 priv
->contexts
[IWL_RXON_CTX_PAN
].station_devtype
= RXON_DEV_TYPE_2STA
;
635 priv
->contexts
[IWL_RXON_CTX_PAN
].unused_devtype
= RXON_DEV_TYPE_P2P
;
636 memcpy(priv
->contexts
[IWL_RXON_CTX_PAN
].ac_to_queue
,
637 iwlagn_pan_ac_to_queue
, sizeof(iwlagn_pan_ac_to_queue
));
638 memcpy(priv
->contexts
[IWL_RXON_CTX_PAN
].ac_to_fifo
,
639 iwlagn_pan_ac_to_fifo
, sizeof(iwlagn_pan_ac_to_fifo
));
640 priv
->contexts
[IWL_RXON_CTX_PAN
].mcast_queue
= IWL_IPAN_MCAST_QUEUE
;
642 BUILD_BUG_ON(NUM_IWL_RXON_CTX
!= 2);
645 static void iwl_rf_kill_ct_config(struct iwl_priv
*priv
)
647 struct iwl_ct_kill_config cmd
;
648 struct iwl_ct_kill_throttling_config adv_cmd
;
651 iwl_write32(trans(priv
), CSR_UCODE_DRV_GP1_CLR
,
652 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
654 priv
->thermal_throttle
.ct_kill_toggle
= false;
656 if (cfg(priv
)->base_params
->support_ct_kill_exit
) {
657 adv_cmd
.critical_temperature_enter
=
658 cpu_to_le32(hw_params(priv
).ct_kill_threshold
);
659 adv_cmd
.critical_temperature_exit
=
660 cpu_to_le32(hw_params(priv
).ct_kill_exit_threshold
);
662 ret
= iwl_dvm_send_cmd_pdu(priv
,
663 REPLY_CT_KILL_CONFIG_CMD
,
664 CMD_SYNC
, sizeof(adv_cmd
), &adv_cmd
);
666 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
668 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
669 "succeeded, critical temperature enter is %d,"
671 hw_params(priv
).ct_kill_threshold
,
672 hw_params(priv
).ct_kill_exit_threshold
);
674 cmd
.critical_temperature_R
=
675 cpu_to_le32(hw_params(priv
).ct_kill_threshold
);
677 ret
= iwl_dvm_send_cmd_pdu(priv
,
678 REPLY_CT_KILL_CONFIG_CMD
,
679 CMD_SYNC
, sizeof(cmd
), &cmd
);
681 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
683 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
685 "critical temperature is %d\n",
686 hw_params(priv
).ct_kill_threshold
);
690 static int iwlagn_send_calib_cfg_rt(struct iwl_priv
*priv
, u32 cfg
)
692 struct iwl_calib_cfg_cmd calib_cfg_cmd
;
693 struct iwl_host_cmd cmd
= {
694 .id
= CALIBRATION_CFG_CMD
,
695 .len
= { sizeof(struct iwl_calib_cfg_cmd
), },
696 .data
= { &calib_cfg_cmd
, },
699 memset(&calib_cfg_cmd
, 0, sizeof(calib_cfg_cmd
));
700 calib_cfg_cmd
.ucd_calib_cfg
.once
.is_enable
= IWL_CALIB_RT_CFG_ALL
;
701 calib_cfg_cmd
.ucd_calib_cfg
.once
.start
= cpu_to_le32(cfg
);
703 return iwl_dvm_send_cmd(priv
, &cmd
);
707 static int iwlagn_send_tx_ant_config(struct iwl_priv
*priv
, u8 valid_tx_ant
)
709 struct iwl_tx_ant_config_cmd tx_ant_cmd
= {
710 .valid
= cpu_to_le32(valid_tx_ant
),
713 if (IWL_UCODE_API(priv
->fw
->ucode_ver
) > 1) {
714 IWL_DEBUG_HC(priv
, "select valid tx ant: %u\n", valid_tx_ant
);
715 return iwl_dvm_send_cmd_pdu(priv
,
716 TX_ANT_CONFIGURATION_CMD
,
718 sizeof(struct iwl_tx_ant_config_cmd
),
721 IWL_DEBUG_HC(priv
, "TX_ANT_CONFIGURATION_CMD not supported\n");
727 * iwl_alive_start - called after REPLY_ALIVE notification received
728 * from protocol/runtime uCode (initialization uCode's
729 * Alive gets handled by iwl_init_alive_start()).
731 int iwl_alive_start(struct iwl_priv
*priv
)
734 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
736 IWL_DEBUG_INFO(priv
, "Runtime Alive received.\n");
738 /* After the ALIVE response, we can send host commands to the uCode */
739 set_bit(STATUS_ALIVE
, &priv
->status
);
741 /* Enable watchdog to monitor the driver tx queues */
742 iwl_setup_watchdog(priv
);
744 if (iwl_is_rfkill(priv
))
747 if (priv
->event_log
.ucode_trace
) {
748 /* start collecting data now */
749 mod_timer(&priv
->ucode_trace
, jiffies
);
752 /* download priority table before any calibration request */
753 if (cfg(priv
)->bt_params
&&
754 cfg(priv
)->bt_params
->advanced_bt_coexist
) {
755 /* Configure Bluetooth device coexistence support */
756 if (cfg(priv
)->bt_params
->bt_sco_disable
)
757 priv
->bt_enable_pspoll
= false;
759 priv
->bt_enable_pspoll
= true;
761 priv
->bt_valid
= IWLAGN_BT_ALL_VALID_MSK
;
762 priv
->kill_ack_mask
= IWLAGN_BT_KILL_ACK_MASK_DEFAULT
;
763 priv
->kill_cts_mask
= IWLAGN_BT_KILL_CTS_MASK_DEFAULT
;
764 iwlagn_send_advance_bt_config(priv
);
765 priv
->bt_valid
= IWLAGN_BT_VALID_ENABLE_FLAGS
;
766 priv
->cur_rssi_ctx
= NULL
;
768 iwl_send_prio_tbl(priv
);
770 /* FIXME: w/a to force change uCode BT state machine */
771 ret
= iwl_send_bt_env(priv
, IWL_BT_COEX_ENV_OPEN
,
772 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2
);
775 ret
= iwl_send_bt_env(priv
, IWL_BT_COEX_ENV_CLOSE
,
776 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2
);
781 * default is 2-wire BT coexexistence support
783 iwl_send_bt_config(priv
);
787 * Perform runtime calibrations, including DC calibration.
789 iwlagn_send_calib_cfg_rt(priv
, IWL_CALIB_CFG_DC_IDX
);
791 ieee80211_wake_queues(priv
->hw
);
793 priv
->active_rate
= IWL_RATES_MASK
;
795 /* Configure Tx antenna selection based on H/W config */
796 iwlagn_send_tx_ant_config(priv
, hw_params(priv
).valid_tx_ant
);
798 if (iwl_is_associated_ctx(ctx
) && !priv
->wowlan
) {
799 struct iwl_rxon_cmd
*active_rxon
=
800 (struct iwl_rxon_cmd
*)&ctx
->active
;
801 /* apply any changes in staging */
802 ctx
->staging
.filter_flags
|= RXON_FILTER_ASSOC_MSK
;
803 active_rxon
->filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
805 struct iwl_rxon_context
*tmp
;
806 /* Initialize our rx_config data */
807 for_each_context(priv
, tmp
)
808 iwl_connection_init_rx_config(priv
, tmp
);
810 iwlagn_set_rxon_chain(priv
, ctx
);
814 /* WoWLAN ucode will not reply in the same way, skip it */
815 iwl_reset_run_time_calib(priv
);
818 set_bit(STATUS_READY
, &priv
->status
);
820 /* Configure the adapter for unassociated operation */
821 ret
= iwlagn_commit_rxon(priv
, ctx
);
825 /* At this point, the NIC is initialized and operational */
826 iwl_rf_kill_ct_config(priv
);
828 IWL_DEBUG_INFO(priv
, "ALIVE processing complete.\n");
830 return iwl_power_update_mode(priv
, true);
834 * iwl_clear_driver_stations - clear knowledge of all stations from driver
835 * @priv: iwl priv struct
837 * This is called during iwl_down() to make sure that in the case
838 * we're coming there from a hardware restart mac80211 will be
839 * able to reconfigure stations -- if we're getting there in the
840 * normal down flow then the stations will already be cleared.
842 static void iwl_clear_driver_stations(struct iwl_priv
*priv
)
844 struct iwl_rxon_context
*ctx
;
846 spin_lock_bh(&priv
->sta_lock
);
847 memset(priv
->stations
, 0, sizeof(priv
->stations
));
848 priv
->num_stations
= 0;
850 priv
->ucode_key_table
= 0;
852 for_each_context(priv
, ctx
) {
854 * Remove all key information that is not stored as part
855 * of station information since mac80211 may not have had
856 * a chance to remove all the keys. When device is
857 * reconfigured by mac80211 after an error all keys will
860 memset(ctx
->wep_keys
, 0, sizeof(ctx
->wep_keys
));
861 ctx
->key_mapping_keys
= 0;
864 spin_unlock_bh(&priv
->sta_lock
);
867 void iwl_down(struct iwl_priv
*priv
)
871 IWL_DEBUG_INFO(priv
, DRV_NAME
" is going down\n");
873 lockdep_assert_held(&priv
->mutex
);
875 iwl_scan_cancel_timeout(priv
, 200);
878 * If active, scanning won't cancel it, so say it expired.
879 * No race since we hold the mutex here and a new one
880 * can't come in at this time.
882 ieee80211_remain_on_channel_expired(priv
->hw
);
885 test_and_set_bit(STATUS_EXIT_PENDING
, &priv
->status
);
887 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
888 * to prevent rearm timer */
889 del_timer_sync(&priv
->watchdog
);
891 iwl_clear_ucode_stations(priv
, NULL
);
892 iwl_dealloc_bcast_stations(priv
);
893 iwl_clear_driver_stations(priv
);
895 /* reset BT coex data */
897 priv
->cur_rssi_ctx
= NULL
;
899 if (cfg(priv
)->bt_params
)
900 priv
->bt_traffic_load
=
901 cfg(priv
)->bt_params
->bt_init_traffic_load
;
903 priv
->bt_traffic_load
= 0;
904 priv
->bt_full_concurrent
= false;
905 priv
->bt_ci_compliance
= 0;
907 /* Wipe out the EXIT_PENDING status bit if we are not actually
908 * exiting the module */
910 clear_bit(STATUS_EXIT_PENDING
, &priv
->status
);
912 if (priv
->mac80211_registered
)
913 ieee80211_stop_queues(priv
->hw
);
915 priv
->ucode_loaded
= false;
916 iwl_trans_stop_device(trans(priv
));
918 /* Clear out all status bits but a few that are stable across reset */
919 priv
->status
&= test_bit(STATUS_RF_KILL_HW
, &priv
->status
) <<
921 test_bit(STATUS_GEO_CONFIGURED
, &priv
->status
) <<
922 STATUS_GEO_CONFIGURED
|
923 test_bit(STATUS_EXIT_PENDING
, &priv
->status
) <<
925 priv
->shrd
->status
&=
926 test_bit(STATUS_FW_ERROR
, &priv
->shrd
->status
) <<
929 dev_kfree_skb(priv
->beacon_skb
);
930 priv
->beacon_skb
= NULL
;
933 /*****************************************************************************
935 * Workqueue callbacks
937 *****************************************************************************/
939 static void iwl_bg_run_time_calib_work(struct work_struct
*work
)
941 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
,
942 run_time_calib_work
);
944 mutex_lock(&priv
->mutex
);
946 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
) ||
947 test_bit(STATUS_SCANNING
, &priv
->status
)) {
948 mutex_unlock(&priv
->mutex
);
952 if (priv
->start_calib
) {
953 iwl_chain_noise_calibration(priv
);
954 iwl_sensitivity_calibration(priv
);
957 mutex_unlock(&priv
->mutex
);
960 void iwlagn_prepare_restart(struct iwl_priv
*priv
)
962 struct iwl_rxon_context
*ctx
;
963 bool bt_full_concurrent
;
970 lockdep_assert_held(&priv
->mutex
);
972 for_each_context(priv
, ctx
)
977 * __iwl_down() will clear the BT status variables,
978 * which is correct, but when we restart we really
979 * want to keep them so restore them afterwards.
981 * The restart process will later pick them up and
982 * re-configure the hw when we reconfigure the BT
985 bt_full_concurrent
= priv
->bt_full_concurrent
;
986 bt_ci_compliance
= priv
->bt_ci_compliance
;
987 bt_load
= priv
->bt_traffic_load
;
988 bt_status
= priv
->bt_status
;
989 bt_is_sco
= priv
->bt_is_sco
;
993 priv
->bt_full_concurrent
= bt_full_concurrent
;
994 priv
->bt_ci_compliance
= bt_ci_compliance
;
995 priv
->bt_traffic_load
= bt_load
;
996 priv
->bt_status
= bt_status
;
997 priv
->bt_is_sco
= bt_is_sco
;
999 /* reset all queues */
1000 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++)
1001 atomic_set(&priv
->ac_stop_count
[i
], 0);
1003 for (i
= IWLAGN_FIRST_AMPDU_QUEUE
; i
< IWL_MAX_HW_QUEUES
; i
++)
1004 priv
->queue_to_ac
[i
] = IWL_INVALID_AC
;
1006 memset(priv
->agg_q_alloc
, 0, sizeof(priv
->agg_q_alloc
));
1009 static void iwl_bg_restart(struct work_struct
*data
)
1011 struct iwl_priv
*priv
= container_of(data
, struct iwl_priv
, restart
);
1013 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1016 if (test_and_clear_bit(STATUS_FW_ERROR
, &priv
->shrd
->status
)) {
1017 mutex_lock(&priv
->mutex
);
1018 iwlagn_prepare_restart(priv
);
1019 mutex_unlock(&priv
->mutex
);
1020 iwl_cancel_deferred_work(priv
);
1021 ieee80211_restart_hw(priv
->hw
);
1030 void iwlagn_disable_roc(struct iwl_priv
*priv
)
1032 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_PAN
];
1034 lockdep_assert_held(&priv
->mutex
);
1036 if (!priv
->hw_roc_setup
)
1039 ctx
->staging
.dev_type
= RXON_DEV_TYPE_P2P
;
1040 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
1042 priv
->hw_roc_channel
= NULL
;
1044 memset(ctx
->staging
.node_addr
, 0, ETH_ALEN
);
1046 iwlagn_commit_rxon(priv
, ctx
);
1048 ctx
->is_active
= false;
1049 priv
->hw_roc_setup
= false;
1052 static void iwlagn_disable_roc_work(struct work_struct
*work
)
1054 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
,
1055 hw_roc_disable_work
.work
);
1057 mutex_lock(&priv
->mutex
);
1058 iwlagn_disable_roc(priv
);
1059 mutex_unlock(&priv
->mutex
);
1062 /*****************************************************************************
1064 * driver setup and teardown
1066 *****************************************************************************/
1068 static void iwl_setup_deferred_work(struct iwl_priv
*priv
)
1070 priv
->workqueue
= create_singlethread_workqueue(DRV_NAME
);
1072 INIT_WORK(&priv
->restart
, iwl_bg_restart
);
1073 INIT_WORK(&priv
->beacon_update
, iwl_bg_beacon_update
);
1074 INIT_WORK(&priv
->run_time_calib_work
, iwl_bg_run_time_calib_work
);
1075 INIT_WORK(&priv
->tx_flush
, iwl_bg_tx_flush
);
1076 INIT_WORK(&priv
->bt_full_concurrency
, iwl_bg_bt_full_concurrency
);
1077 INIT_WORK(&priv
->bt_runtime_config
, iwl_bg_bt_runtime_config
);
1078 INIT_DELAYED_WORK(&priv
->hw_roc_disable_work
,
1079 iwlagn_disable_roc_work
);
1081 iwl_setup_scan_deferred_work(priv
);
1083 if (cfg(priv
)->bt_params
)
1084 iwlagn_bt_setup_deferred_work(priv
);
1086 init_timer(&priv
->statistics_periodic
);
1087 priv
->statistics_periodic
.data
= (unsigned long)priv
;
1088 priv
->statistics_periodic
.function
= iwl_bg_statistics_periodic
;
1090 init_timer(&priv
->ucode_trace
);
1091 priv
->ucode_trace
.data
= (unsigned long)priv
;
1092 priv
->ucode_trace
.function
= iwl_bg_ucode_trace
;
1094 init_timer(&priv
->watchdog
);
1095 priv
->watchdog
.data
= (unsigned long)priv
;
1096 priv
->watchdog
.function
= iwl_bg_watchdog
;
1099 void iwl_cancel_deferred_work(struct iwl_priv
*priv
)
1101 if (cfg(priv
)->bt_params
)
1102 iwlagn_bt_cancel_deferred_work(priv
);
1104 cancel_work_sync(&priv
->run_time_calib_work
);
1105 cancel_work_sync(&priv
->beacon_update
);
1107 iwl_cancel_scan_deferred_work(priv
);
1109 cancel_work_sync(&priv
->bt_full_concurrency
);
1110 cancel_work_sync(&priv
->bt_runtime_config
);
1111 cancel_delayed_work_sync(&priv
->hw_roc_disable_work
);
1113 del_timer_sync(&priv
->statistics_periodic
);
1114 del_timer_sync(&priv
->ucode_trace
);
1117 static void iwl_init_hw_rates(struct ieee80211_rate
*rates
)
1121 for (i
= 0; i
< IWL_RATE_COUNT_LEGACY
; i
++) {
1122 rates
[i
].bitrate
= iwl_rates
[i
].ieee
* 5;
1123 rates
[i
].hw_value
= i
; /* Rate scaling will work on indexes */
1124 rates
[i
].hw_value_short
= i
;
1126 if ((i
>= IWL_FIRST_CCK_RATE
) && (i
<= IWL_LAST_CCK_RATE
)) {
1128 * If CCK != 1M then set short preamble rate flag.
1131 (iwl_rates
[i
].plcp
== IWL_RATE_1M_PLCP
) ?
1132 0 : IEEE80211_RATE_SHORT_PREAMBLE
;
1137 static int iwl_init_drv(struct iwl_priv
*priv
)
1141 spin_lock_init(&priv
->sta_lock
);
1143 mutex_init(&priv
->mutex
);
1145 INIT_LIST_HEAD(&priv
->calib_results
);
1147 priv
->ieee_channels
= NULL
;
1148 priv
->ieee_rates
= NULL
;
1149 priv
->band
= IEEE80211_BAND_2GHZ
;
1151 priv
->plcp_delta_threshold
=
1152 cfg(priv
)->base_params
->plcp_delta_threshold
;
1154 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
1155 priv
->current_ht_config
.smps
= IEEE80211_SMPS_STATIC
;
1156 priv
->missed_beacon_threshold
= IWL_MISSED_BEACON_THRESHOLD_DEF
;
1157 priv
->agg_tids_count
= 0;
1159 priv
->ucode_owner
= IWL_OWNERSHIP_DRIVER
;
1161 /* initialize force reset */
1162 priv
->force_reset
[IWL_RF_RESET
].reset_duration
=
1163 IWL_DELAY_NEXT_FORCE_RF_RESET
;
1164 priv
->force_reset
[IWL_FW_RESET
].reset_duration
=
1165 IWL_DELAY_NEXT_FORCE_FW_RELOAD
;
1167 priv
->rx_statistics_jiffies
= jiffies
;
1169 /* Choose which receivers/antennas to use */
1170 iwlagn_set_rxon_chain(priv
, &priv
->contexts
[IWL_RXON_CTX_BSS
]);
1172 iwl_init_scan_params(priv
);
1175 if (cfg(priv
)->bt_params
&&
1176 cfg(priv
)->bt_params
->advanced_bt_coexist
) {
1177 priv
->kill_ack_mask
= IWLAGN_BT_KILL_ACK_MASK_DEFAULT
;
1178 priv
->kill_cts_mask
= IWLAGN_BT_KILL_CTS_MASK_DEFAULT
;
1179 priv
->bt_valid
= IWLAGN_BT_ALL_VALID_MSK
;
1180 priv
->bt_on_thresh
= BT_ON_THRESHOLD_DEF
;
1181 priv
->bt_duration
= BT_DURATION_LIMIT_DEF
;
1182 priv
->dynamic_frag_thresh
= BT_FRAG_THRESHOLD_DEF
;
1185 ret
= iwl_init_channel_map(priv
);
1187 IWL_ERR(priv
, "initializing regulatory failed: %d\n", ret
);
1191 ret
= iwl_init_geos(priv
);
1193 IWL_ERR(priv
, "initializing geos failed: %d\n", ret
);
1194 goto err_free_channel_map
;
1196 iwl_init_hw_rates(priv
->ieee_rates
);
1200 err_free_channel_map
:
1201 iwl_free_channel_map(priv
);
1206 static void iwl_uninit_drv(struct iwl_priv
*priv
)
1208 iwl_free_geos(priv
);
1209 iwl_free_channel_map(priv
);
1210 kfree(priv
->scan_cmd
);
1211 kfree(priv
->beacon_cmd
);
1212 kfree(rcu_dereference_raw(priv
->noa_data
));
1213 iwl_calib_free_results(priv
);
1214 #ifdef CONFIG_IWLWIFI_DEBUGFS
1215 kfree(priv
->wowlan_sram
);
1219 /* Size of one Rx buffer in host DRAM */
1220 #define IWL_RX_BUF_SIZE_4K (4 * 1024)
1221 #define IWL_RX_BUF_SIZE_8K (8 * 1024)
1223 static void iwl_set_hw_params(struct iwl_priv
*priv
)
1225 if (cfg(priv
)->ht_params
)
1226 hw_params(priv
).use_rts_for_aggregation
=
1227 cfg(priv
)->ht_params
->use_rts_for_aggregation
;
1229 if (iwlagn_mod_params
.amsdu_size_8K
)
1230 hw_params(priv
).rx_page_order
=
1231 get_order(IWL_RX_BUF_SIZE_8K
);
1233 hw_params(priv
).rx_page_order
=
1234 get_order(IWL_RX_BUF_SIZE_4K
);
1236 if (iwlagn_mod_params
.disable_11n
& IWL_DISABLE_HT_ALL
)
1237 hw_params(priv
).sku
&= ~EEPROM_SKU_CAP_11N_ENABLE
;
1239 hw_params(priv
).wd_timeout
= cfg(priv
)->base_params
->wd_timeout
;
1241 /* Device-specific setup */
1242 cfg(priv
)->lib
->set_hw_params(priv
);
1247 static void iwl_debug_config(struct iwl_priv
*priv
)
1249 dev_printk(KERN_INFO
, trans(priv
)->dev
, "CONFIG_IWLWIFI_DEBUG "
1250 #ifdef CONFIG_IWLWIFI_DEBUG
1255 dev_printk(KERN_INFO
, trans(priv
)->dev
, "CONFIG_IWLWIFI_DEBUGFS "
1256 #ifdef CONFIG_IWLWIFI_DEBUGFS
1261 dev_printk(KERN_INFO
, trans(priv
)->dev
, "CONFIG_IWLWIFI_DEVICE_TRACING "
1262 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1268 dev_printk(KERN_INFO
, trans(priv
)->dev
, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
1269 #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1274 dev_printk(KERN_INFO
, trans(priv
)->dev
, "CONFIG_IWLWIFI_P2P "
1275 #ifdef CONFIG_IWLWIFI_P2P
1282 static struct iwl_op_mode
*iwl_op_mode_dvm_start(struct iwl_trans
*trans
,
1283 const struct iwl_fw
*fw
)
1285 struct iwl_priv
*priv
;
1286 struct ieee80211_hw
*hw
;
1287 struct iwl_op_mode
*op_mode
;
1290 struct iwl_trans_config trans_cfg
;
1291 static const u8 no_reclaim_cmds
[] = {
1295 REPLY_COMPRESSED_BA
,
1296 STATISTICS_NOTIFICATION
,
1303 /************************
1304 * 1. Allocating HW data
1305 ************************/
1306 hw
= iwl_alloc_all();
1308 pr_err("%s: Cannot allocate network device\n",
1314 op_mode
->ops
= &iwl_dvm_ops
;
1315 priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1316 priv
->shrd
= trans
->shrd
;
1318 /* TODO: remove fw from shared data later */
1319 priv
->shrd
->fw
= fw
;
1322 * Populate the state variables that the transport layer needs
1325 trans_cfg
.op_mode
= op_mode
;
1326 trans_cfg
.no_reclaim_cmds
= no_reclaim_cmds
;
1327 trans_cfg
.n_no_reclaim_cmds
= ARRAY_SIZE(no_reclaim_cmds
);
1329 ucode_flags
= fw
->ucode_capa
.flags
;
1331 #ifndef CONFIG_IWLWIFI_P2P
1332 ucode_flags
&= ~IWL_UCODE_TLV_FLAGS_PAN
;
1335 if (ucode_flags
& IWL_UCODE_TLV_FLAGS_PAN
) {
1336 priv
->sta_key_max_num
= STA_KEY_MAX_NUM_PAN
;
1337 trans_cfg
.cmd_queue
= IWL_IPAN_CMD_QUEUE_NUM
;
1338 trans_cfg
.queue_to_fifo
= iwlagn_ipan_queue_to_tx_fifo
;
1339 trans_cfg
.n_queue_to_fifo
=
1340 ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo
);
1341 q_to_ac
= iwlagn_pan_queue_to_ac
;
1342 n_q_to_ac
= ARRAY_SIZE(iwlagn_pan_queue_to_ac
);
1344 priv
->sta_key_max_num
= STA_KEY_MAX_NUM
;
1345 trans_cfg
.cmd_queue
= IWL_DEFAULT_CMD_QUEUE_NUM
;
1346 trans_cfg
.queue_to_fifo
= iwlagn_default_queue_to_tx_fifo
;
1347 trans_cfg
.n_queue_to_fifo
=
1348 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo
);
1349 q_to_ac
= iwlagn_bss_queue_to_ac
;
1350 n_q_to_ac
= ARRAY_SIZE(iwlagn_bss_queue_to_ac
);
1353 /* Configure transport layer */
1354 iwl_trans_configure(trans(priv
), &trans_cfg
);
1356 /* At this point both hw and priv are allocated. */
1358 SET_IEEE80211_DEV(priv
->hw
, trans(priv
)->dev
);
1360 /* show what debugging capabilities we have */
1361 iwl_debug_config(priv
);
1363 IWL_DEBUG_INFO(priv
, "*** LOAD DRIVER ***\n");
1365 /* is antenna coupling more than 35dB ? */
1366 priv
->bt_ant_couple_ok
=
1367 (iwlagn_mod_params
.ant_coupling
>
1368 IWL_BT_ANTENNA_COUPLING_THRESHOLD
) ?
1371 /* enable/disable bt channel inhibition */
1372 priv
->bt_ch_announce
= iwlagn_mod_params
.bt_ch_announce
;
1373 IWL_DEBUG_INFO(priv
, "BT channel inhibition is %s\n",
1374 (priv
->bt_ch_announce
) ? "On" : "Off");
1376 if (iwl_alloc_traffic_mem(priv
))
1377 IWL_ERR(priv
, "Not enough memory to generate traffic log\n");
1379 /* these spin locks will be used in apm_ops.init and EEPROM access
1380 * we should init now
1382 spin_lock_init(&trans(priv
)->reg_lock
);
1383 spin_lock_init(&priv
->statistics
.lock
);
1385 /***********************
1386 * 2. Read REV register
1387 ***********************/
1388 IWL_INFO(priv
, "Detected %s, REV=0x%X\n",
1389 cfg(priv
)->name
, trans(priv
)->hw_rev
);
1391 if (iwl_trans_start_hw(trans(priv
)))
1392 goto out_free_traffic_mem
;
1397 /* Read the EEPROM */
1398 if (iwl_eeprom_init(trans(priv
), trans(priv
)->hw_rev
)) {
1399 IWL_ERR(priv
, "Unable to init EEPROM\n");
1400 goto out_free_traffic_mem
;
1402 /* Reset chip to save power until we load uCode during "up". */
1403 iwl_trans_stop_hw(trans(priv
));
1405 if (iwl_eeprom_check_version(priv
))
1406 goto out_free_eeprom
;
1408 if (iwl_eeprom_init_hw_params(priv
))
1409 goto out_free_eeprom
;
1411 /* extract MAC Address */
1412 iwl_eeprom_get_mac(priv
->shrd
, priv
->addresses
[0].addr
);
1413 IWL_DEBUG_INFO(priv
, "MAC address: %pM\n", priv
->addresses
[0].addr
);
1414 priv
->hw
->wiphy
->addresses
= priv
->addresses
;
1415 priv
->hw
->wiphy
->n_addresses
= 1;
1416 num_mac
= iwl_eeprom_query16(priv
->shrd
, EEPROM_NUM_MAC_ADDRESS
);
1418 memcpy(priv
->addresses
[1].addr
, priv
->addresses
[0].addr
,
1420 priv
->addresses
[1].addr
[5]++;
1421 priv
->hw
->wiphy
->n_addresses
++;
1424 /************************
1425 * 4. Setup HW constants
1426 ************************/
1427 iwl_set_hw_params(priv
);
1429 if (!(hw_params(priv
).sku
& EEPROM_SKU_CAP_IPAN_ENABLE
)) {
1430 IWL_DEBUG_INFO(priv
, "Your EEPROM disabled PAN");
1431 ucode_flags
&= ~IWL_UCODE_TLV_FLAGS_PAN
;
1433 * if not PAN, then don't support P2P -- might be a uCode
1434 * packaging bug or due to the eeprom check above
1436 ucode_flags
&= ~IWL_UCODE_TLV_FLAGS_P2P
;
1437 priv
->sta_key_max_num
= STA_KEY_MAX_NUM
;
1438 trans_cfg
.cmd_queue
= IWL_DEFAULT_CMD_QUEUE_NUM
;
1439 trans_cfg
.queue_to_fifo
= iwlagn_default_queue_to_tx_fifo
;
1440 trans_cfg
.n_queue_to_fifo
=
1441 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo
);
1442 q_to_ac
= iwlagn_bss_queue_to_ac
;
1443 n_q_to_ac
= ARRAY_SIZE(iwlagn_bss_queue_to_ac
);
1445 /* Configure transport layer again*/
1446 iwl_trans_configure(trans(priv
), &trans_cfg
);
1449 /*******************
1451 *******************/
1452 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++)
1453 atomic_set(&priv
->ac_stop_count
[i
], 0);
1455 for (i
= 0; i
< IWL_MAX_HW_QUEUES
; i
++) {
1457 priv
->queue_to_ac
[i
] = q_to_ac
[i
];
1459 priv
->queue_to_ac
[i
] = IWL_INVALID_AC
;
1462 WARN_ON(trans_cfg
.queue_to_fifo
[trans_cfg
.cmd_queue
] !=
1463 IWLAGN_CMD_FIFO_NUM
);
1465 if (iwl_init_drv(priv
))
1466 goto out_free_eeprom
;
1468 /* At this point both hw and priv are initialized. */
1470 /********************
1472 ********************/
1473 iwl_setup_deferred_work(priv
);
1474 iwl_setup_rx_handlers(priv
);
1475 iwl_testmode_init(priv
);
1477 iwl_power_initialize(priv
);
1478 iwl_tt_initialize(priv
);
1480 snprintf(priv
->hw
->wiphy
->fw_version
,
1481 sizeof(priv
->hw
->wiphy
->fw_version
),
1482 "%s", fw
->fw_version
);
1484 priv
->new_scan_threshold_behaviour
=
1485 !!(ucode_flags
& IWL_UCODE_TLV_FLAGS_NEWSCAN
);
1487 priv
->phy_calib_chain_noise_reset_cmd
=
1488 fw
->ucode_capa
.standard_phy_calibration_size
;
1489 priv
->phy_calib_chain_noise_gain_cmd
=
1490 fw
->ucode_capa
.standard_phy_calibration_size
+ 1;
1492 /* initialize all valid contexts */
1493 iwl_init_context(priv
, ucode_flags
);
1495 /**************************************************
1496 * This is still part of probe() in a sense...
1498 * 7. Setup and register with mac80211 and debugfs
1499 **************************************************/
1500 if (iwlagn_mac_setup_register(priv
, &fw
->ucode_capa
))
1501 goto out_destroy_workqueue
;
1503 if (iwl_dbgfs_register(priv
, DRV_NAME
))
1505 "failed to create debugfs files. Ignoring error\n");
1509 out_destroy_workqueue
:
1510 destroy_workqueue(priv
->workqueue
);
1511 priv
->workqueue
= NULL
;
1512 iwl_uninit_drv(priv
);
1514 iwl_eeprom_free(priv
->shrd
);
1515 out_free_traffic_mem
:
1516 iwl_free_traffic_mem(priv
);
1517 ieee80211_free_hw(priv
->hw
);
1523 static void iwl_op_mode_dvm_stop(struct iwl_op_mode
*op_mode
)
1525 struct iwl_priv
*priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1527 IWL_DEBUG_INFO(priv
, "*** UNLOAD DRIVER ***\n");
1529 iwl_dbgfs_unregister(priv
);
1531 iwl_testmode_cleanup(priv
);
1532 iwlagn_mac_unregister(priv
);
1536 /*This will stop the queues, move the device to low power state */
1537 priv
->ucode_loaded
= false;
1538 iwl_trans_stop_device(trans(priv
));
1540 iwl_eeprom_free(priv
->shrd
);
1542 /*netif_stop_queue(dev); */
1543 flush_workqueue(priv
->workqueue
);
1545 /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
1546 * priv->workqueue... so we can't take down the workqueue
1548 destroy_workqueue(priv
->workqueue
);
1549 priv
->workqueue
= NULL
;
1550 iwl_free_traffic_mem(priv
);
1552 iwl_uninit_drv(priv
);
1554 dev_kfree_skb(priv
->beacon_skb
);
1556 ieee80211_free_hw(priv
->hw
);
1559 static const char * const desc_lookup_text
[] = {
1564 "NMI_INTERRUPT_WDG",
1568 "HW_ERROR_TUNE_LOCK",
1569 "HW_ERROR_TEMPERATURE",
1570 "ILLEGAL_CHAN_FREQ",
1573 "NMI_INTERRUPT_HOST",
1574 "NMI_INTERRUPT_ACTION_PT",
1575 "NMI_INTERRUPT_UNKNOWN",
1576 "UCODE_VERSION_MISMATCH",
1577 "HW_ERROR_ABS_LOCK",
1578 "HW_ERROR_CAL_LOCK_FAIL",
1579 "NMI_INTERRUPT_INST_ACTION_PT",
1580 "NMI_INTERRUPT_DATA_ACTION_PT",
1582 "NMI_INTERRUPT_TRM",
1583 "NMI_INTERRUPT_BREAK_POINT",
1590 static struct { char *name
; u8 num
; } advanced_lookup
[] = {
1591 { "NMI_INTERRUPT_WDG", 0x34 },
1592 { "SYSASSERT", 0x35 },
1593 { "UCODE_VERSION_MISMATCH", 0x37 },
1594 { "BAD_COMMAND", 0x38 },
1595 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1596 { "FATAL_ERROR", 0x3D },
1597 { "NMI_TRM_HW_ERR", 0x46 },
1598 { "NMI_INTERRUPT_TRM", 0x4C },
1599 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1600 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1601 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1602 { "NMI_INTERRUPT_HOST", 0x66 },
1603 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1604 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1605 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1606 { "ADVANCED_SYSASSERT", 0 },
1609 static const char *desc_lookup(u32 num
)
1612 int max
= ARRAY_SIZE(desc_lookup_text
);
1615 return desc_lookup_text
[num
];
1617 max
= ARRAY_SIZE(advanced_lookup
) - 1;
1618 for (i
= 0; i
< max
; i
++) {
1619 if (advanced_lookup
[i
].num
== num
)
1622 return advanced_lookup
[i
].name
;
1625 #define ERROR_START_OFFSET (1 * sizeof(u32))
1626 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
1628 static void iwl_dump_nic_error_log(struct iwl_priv
*priv
)
1630 struct iwl_trans
*trans
= trans(priv
);
1632 struct iwl_error_event_table table
;
1634 base
= priv
->device_pointers
.error_event_table
;
1635 if (priv
->cur_ucode
== IWL_UCODE_INIT
) {
1637 base
= priv
->shrd
->fw
->init_errlog_ptr
;
1640 base
= priv
->shrd
->fw
->inst_errlog_ptr
;
1643 if (!iwlagn_hw_valid_rtc_data_addr(base
)) {
1645 "Not valid error log pointer 0x%08X for %s uCode\n",
1647 (priv
->cur_ucode
== IWL_UCODE_INIT
)
1652 /*TODO: Update dbgfs with ISR error stats obtained below */
1653 iwl_read_targ_mem_words(trans
, base
, &table
, sizeof(table
));
1655 if (ERROR_START_OFFSET
<= table
.valid
* ERROR_ELEM_SIZE
) {
1656 IWL_ERR(trans
, "Start IWL Error Log Dump:\n");
1657 IWL_ERR(trans
, "Status: 0x%08lX, count: %d\n",
1658 priv
->shrd
->status
, table
.valid
);
1661 trace_iwlwifi_dev_ucode_error(trans
->dev
, table
.error_id
, table
.tsf_low
,
1662 table
.data1
, table
.data2
, table
.line
,
1663 table
.blink1
, table
.blink2
, table
.ilink1
,
1664 table
.ilink2
, table
.bcon_time
, table
.gp1
,
1665 table
.gp2
, table
.gp3
, table
.ucode_ver
,
1666 table
.hw_ver
, table
.brd_ver
);
1667 IWL_ERR(priv
, "0x%08X | %-28s\n", table
.error_id
,
1668 desc_lookup(table
.error_id
));
1669 IWL_ERR(priv
, "0x%08X | uPc\n", table
.pc
);
1670 IWL_ERR(priv
, "0x%08X | branchlink1\n", table
.blink1
);
1671 IWL_ERR(priv
, "0x%08X | branchlink2\n", table
.blink2
);
1672 IWL_ERR(priv
, "0x%08X | interruptlink1\n", table
.ilink1
);
1673 IWL_ERR(priv
, "0x%08X | interruptlink2\n", table
.ilink2
);
1674 IWL_ERR(priv
, "0x%08X | data1\n", table
.data1
);
1675 IWL_ERR(priv
, "0x%08X | data2\n", table
.data2
);
1676 IWL_ERR(priv
, "0x%08X | line\n", table
.line
);
1677 IWL_ERR(priv
, "0x%08X | beacon time\n", table
.bcon_time
);
1678 IWL_ERR(priv
, "0x%08X | tsf low\n", table
.tsf_low
);
1679 IWL_ERR(priv
, "0x%08X | tsf hi\n", table
.tsf_hi
);
1680 IWL_ERR(priv
, "0x%08X | time gp1\n", table
.gp1
);
1681 IWL_ERR(priv
, "0x%08X | time gp2\n", table
.gp2
);
1682 IWL_ERR(priv
, "0x%08X | time gp3\n", table
.gp3
);
1683 IWL_ERR(priv
, "0x%08X | uCode version\n", table
.ucode_ver
);
1684 IWL_ERR(priv
, "0x%08X | hw version\n", table
.hw_ver
);
1685 IWL_ERR(priv
, "0x%08X | board version\n", table
.brd_ver
);
1686 IWL_ERR(priv
, "0x%08X | hcmd\n", table
.hcmd
);
1687 IWL_ERR(priv
, "0x%08X | isr0\n", table
.isr0
);
1688 IWL_ERR(priv
, "0x%08X | isr1\n", table
.isr1
);
1689 IWL_ERR(priv
, "0x%08X | isr2\n", table
.isr2
);
1690 IWL_ERR(priv
, "0x%08X | isr3\n", table
.isr3
);
1691 IWL_ERR(priv
, "0x%08X | isr4\n", table
.isr4
);
1692 IWL_ERR(priv
, "0x%08X | isr_pref\n", table
.isr_pref
);
1693 IWL_ERR(priv
, "0x%08X | wait_event\n", table
.wait_event
);
1694 IWL_ERR(priv
, "0x%08X | l2p_control\n", table
.l2p_control
);
1695 IWL_ERR(priv
, "0x%08X | l2p_duration\n", table
.l2p_duration
);
1696 IWL_ERR(priv
, "0x%08X | l2p_mhvalid\n", table
.l2p_mhvalid
);
1697 IWL_ERR(priv
, "0x%08X | l2p_addr_match\n", table
.l2p_addr_match
);
1698 IWL_ERR(priv
, "0x%08X | lmpm_pmg_sel\n", table
.lmpm_pmg_sel
);
1699 IWL_ERR(priv
, "0x%08X | timestamp\n", table
.u_timestamp
);
1700 IWL_ERR(priv
, "0x%08X | flow_handler\n", table
.flow_handler
);
1703 #define EVENT_START_OFFSET (4 * sizeof(u32))
1706 * iwl_print_event_log - Dump error event log to syslog
1709 static int iwl_print_event_log(struct iwl_priv
*priv
, u32 start_idx
,
1710 u32 num_events
, u32 mode
,
1711 int pos
, char **buf
, size_t bufsz
)
1714 u32 base
; /* SRAM byte address of event log header */
1715 u32 event_size
; /* 2 u32s, or 3 u32s if timestamp recorded */
1716 u32 ptr
; /* SRAM byte address of log data */
1717 u32 ev
, time
, data
; /* event log data */
1718 unsigned long reg_flags
;
1720 struct iwl_trans
*trans
= trans(priv
);
1722 if (num_events
== 0)
1725 base
= priv
->device_pointers
.log_event_table
;
1726 if (priv
->cur_ucode
== IWL_UCODE_INIT
) {
1728 base
= priv
->shrd
->fw
->init_evtlog_ptr
;
1731 base
= priv
->shrd
->fw
->inst_evtlog_ptr
;
1735 event_size
= 2 * sizeof(u32
);
1737 event_size
= 3 * sizeof(u32
);
1739 ptr
= base
+ EVENT_START_OFFSET
+ (start_idx
* event_size
);
1741 /* Make sure device is powered up for SRAM reads */
1742 spin_lock_irqsave(&trans
->reg_lock
, reg_flags
);
1743 if (unlikely(!iwl_grab_nic_access(trans
)))
1746 /* Set starting address; reads will auto-increment */
1747 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, ptr
);
1749 /* "time" is actually "data" for mode 0 (no timestamp).
1750 * place event id # at far right for easier visual parsing. */
1751 for (i
= 0; i
< num_events
; i
++) {
1752 ev
= iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1753 time
= iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1757 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1758 "EVT_LOG:0x%08x:%04u\n",
1761 trace_iwlwifi_dev_ucode_event(trans
->dev
, 0,
1763 IWL_ERR(priv
, "EVT_LOG:0x%08x:%04u\n",
1767 data
= iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1769 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1770 "EVT_LOGT:%010u:0x%08x:%04u\n",
1773 IWL_ERR(priv
, "EVT_LOGT:%010u:0x%08x:%04u\n",
1775 trace_iwlwifi_dev_ucode_event(trans
->dev
, time
,
1781 /* Allow device to power down */
1782 iwl_release_nic_access(trans
);
1784 spin_unlock_irqrestore(&trans
->reg_lock
, reg_flags
);
1789 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1791 static int iwl_print_last_event_logs(struct iwl_priv
*priv
, u32 capacity
,
1792 u32 num_wraps
, u32 next_entry
,
1794 int pos
, char **buf
, size_t bufsz
)
1797 * display the newest DEFAULT_LOG_ENTRIES entries
1798 * i.e the entries just before the next ont that uCode would fill.
1801 if (next_entry
< size
) {
1802 pos
= iwl_print_event_log(priv
,
1803 capacity
- (size
- next_entry
),
1804 size
- next_entry
, mode
,
1806 pos
= iwl_print_event_log(priv
, 0,
1810 pos
= iwl_print_event_log(priv
, next_entry
- size
,
1811 size
, mode
, pos
, buf
, bufsz
);
1813 if (next_entry
< size
) {
1814 pos
= iwl_print_event_log(priv
, 0, next_entry
,
1815 mode
, pos
, buf
, bufsz
);
1817 pos
= iwl_print_event_log(priv
, next_entry
- size
,
1818 size
, mode
, pos
, buf
, bufsz
);
1824 #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1826 int iwl_dump_nic_event_log(struct iwl_priv
*priv
, bool full_log
,
1827 char **buf
, bool display
)
1829 u32 base
; /* SRAM byte address of event log header */
1830 u32 capacity
; /* event log capacity in # entries */
1831 u32 mode
; /* 0 - no timestamp, 1 - timestamp recorded */
1832 u32 num_wraps
; /* # times uCode wrapped to top of log */
1833 u32 next_entry
; /* index of next entry to be written by uCode */
1834 u32 size
; /* # entries that we'll print */
1838 struct iwl_trans
*trans
= trans(priv
);
1840 base
= priv
->device_pointers
.log_event_table
;
1841 if (priv
->cur_ucode
== IWL_UCODE_INIT
) {
1842 logsize
= priv
->shrd
->fw
->init_evtlog_size
;
1844 base
= priv
->shrd
->fw
->init_evtlog_ptr
;
1846 logsize
= priv
->shrd
->fw
->inst_evtlog_size
;
1848 base
= priv
->shrd
->fw
->inst_evtlog_ptr
;
1851 if (!iwlagn_hw_valid_rtc_data_addr(base
)) {
1853 "Invalid event log pointer 0x%08X for %s uCode\n",
1855 (priv
->cur_ucode
== IWL_UCODE_INIT
)
1860 /* event log header */
1861 capacity
= iwl_read_targ_mem(trans
, base
);
1862 mode
= iwl_read_targ_mem(trans
, base
+ (1 * sizeof(u32
)));
1863 num_wraps
= iwl_read_targ_mem(trans
, base
+ (2 * sizeof(u32
)));
1864 next_entry
= iwl_read_targ_mem(trans
, base
+ (3 * sizeof(u32
)));
1866 if (capacity
> logsize
) {
1867 IWL_ERR(priv
, "Log capacity %d is bogus, limit to %d "
1868 "entries\n", capacity
, logsize
);
1872 if (next_entry
> logsize
) {
1873 IWL_ERR(priv
, "Log write index %d is bogus, limit to %d\n",
1874 next_entry
, logsize
);
1875 next_entry
= logsize
;
1878 size
= num_wraps
? capacity
: next_entry
;
1880 /* bail out if nothing in log */
1882 IWL_ERR(trans
, "Start IWL Event Log Dump: nothing in log\n");
1886 #ifdef CONFIG_IWLWIFI_DEBUG
1887 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS
)) && !full_log
)
1888 size
= (size
> DEFAULT_DUMP_EVENT_LOG_ENTRIES
)
1889 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES
: size
;
1891 size
= (size
> DEFAULT_DUMP_EVENT_LOG_ENTRIES
)
1892 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES
: size
;
1894 IWL_ERR(priv
, "Start IWL Event Log Dump: display last %u entries\n",
1897 #ifdef CONFIG_IWLWIFI_DEBUG
1900 bufsz
= capacity
* 48;
1903 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
1907 if (iwl_have_debug_level(IWL_DL_FW_ERRORS
) || full_log
) {
1909 * if uCode has wrapped back to top of log,
1910 * start at the oldest entry,
1911 * i.e the next one that uCode would fill.
1914 pos
= iwl_print_event_log(priv
, next_entry
,
1915 capacity
- next_entry
, mode
,
1917 /* (then/else) start at top of log */
1918 pos
= iwl_print_event_log(priv
, 0,
1919 next_entry
, mode
, pos
, buf
, bufsz
);
1921 pos
= iwl_print_last_event_logs(priv
, capacity
, num_wraps
,
1922 next_entry
, size
, mode
,
1925 pos
= iwl_print_last_event_logs(priv
, capacity
, num_wraps
,
1926 next_entry
, size
, mode
,
1932 static void iwl_nic_error(struct iwl_op_mode
*op_mode
)
1934 struct iwl_priv
*priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1936 IWL_ERR(priv
, "Loaded firmware version: %s\n",
1937 priv
->shrd
->fw
->fw_version
);
1939 iwl_dump_nic_error_log(priv
);
1940 iwl_dump_nic_event_log(priv
, false, NULL
, false);
1942 iwlagn_fw_error(priv
, false);
1945 static void iwl_cmd_queue_full(struct iwl_op_mode
*op_mode
)
1947 struct iwl_priv
*priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1949 if (!iwl_check_for_ct_kill(priv
)) {
1950 IWL_ERR(priv
, "Restarting adapter queue is full\n");
1951 iwlagn_fw_error(priv
, false);
1955 static void iwl_nic_config(struct iwl_op_mode
*op_mode
)
1957 struct iwl_priv
*priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1959 cfg(priv
)->lib
->nic_config(priv
);
1962 static void iwl_stop_sw_queue(struct iwl_op_mode
*op_mode
, int queue
)
1964 struct iwl_priv
*priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1965 int ac
= priv
->queue_to_ac
[queue
];
1967 if (WARN_ON_ONCE(ac
== IWL_INVALID_AC
))
1970 if (atomic_inc_return(&priv
->ac_stop_count
[ac
]) > 1) {
1971 IWL_DEBUG_TX_QUEUES(priv
,
1972 "queue %d (AC %d) already stopped\n",
1977 set_bit(ac
, &priv
->transport_queue_stop
);
1978 ieee80211_stop_queue(priv
->hw
, ac
);
1981 static void iwl_wake_sw_queue(struct iwl_op_mode
*op_mode
, int queue
)
1983 struct iwl_priv
*priv
= IWL_OP_MODE_GET_DVM(op_mode
);
1984 int ac
= priv
->queue_to_ac
[queue
];
1986 if (WARN_ON_ONCE(ac
== IWL_INVALID_AC
))
1989 if (atomic_dec_return(&priv
->ac_stop_count
[ac
]) > 0) {
1990 IWL_DEBUG_TX_QUEUES(priv
,
1991 "queue %d (AC %d) already awake\n",
1996 clear_bit(ac
, &priv
->transport_queue_stop
);
1998 if (!priv
->passive_no_rx
)
1999 ieee80211_wake_queue(priv
->hw
, ac
);
2002 void iwlagn_lift_passive_no_rx(struct iwl_priv
*priv
)
2006 if (!priv
->passive_no_rx
)
2009 for (ac
= IEEE80211_AC_VO
; ac
< IEEE80211_NUM_ACS
; ac
++) {
2010 if (!test_bit(ac
, &priv
->transport_queue_stop
)) {
2011 IWL_DEBUG_TX_QUEUES(priv
, "Wake queue %d");
2012 ieee80211_wake_queue(priv
->hw
, ac
);
2014 IWL_DEBUG_TX_QUEUES(priv
, "Don't wake queue %d");
2018 priv
->passive_no_rx
= false;
2021 const struct iwl_op_mode_ops iwl_dvm_ops
= {
2022 .start
= iwl_op_mode_dvm_start
,
2023 .stop
= iwl_op_mode_dvm_stop
,
2024 .rx
= iwl_rx_dispatch
,
2025 .queue_full
= iwl_stop_sw_queue
,
2026 .queue_not_full
= iwl_wake_sw_queue
,
2027 .hw_rf_kill
= iwl_set_hw_rfkill_state
,
2028 .free_skb
= iwl_free_skb
,
2029 .nic_error
= iwl_nic_error
,
2030 .cmd_queue_full
= iwl_cmd_queue_full
,
2031 .nic_config
= iwl_nic_config
,
2034 /*****************************************************************************
2036 * driver and module entry point
2038 *****************************************************************************/
2040 struct kmem_cache
*iwl_tx_cmd_pool
;
2042 static int __init
iwl_init(void)
2046 pr_info(DRV_DESCRIPTION
", " DRV_VERSION
"\n");
2047 pr_info(DRV_COPYRIGHT
"\n");
2049 iwl_tx_cmd_pool
= kmem_cache_create("iwl_dev_cmd",
2050 sizeof(struct iwl_device_cmd
),
2051 sizeof(void *), 0, NULL
);
2052 if (!iwl_tx_cmd_pool
)
2055 ret
= iwlagn_rate_control_register();
2057 pr_err("Unable to register rate control algorithm: %d\n", ret
);
2058 goto error_rc_register
;
2061 ret
= iwl_pci_register_driver();
2063 goto error_pci_register
;
2067 iwlagn_rate_control_unregister();
2069 kmem_cache_destroy(iwl_tx_cmd_pool
);
2073 static void __exit
iwl_exit(void)
2075 iwl_pci_unregister_driver();
2076 iwlagn_rate_control_unregister();
2077 kmem_cache_destroy(iwl_tx_cmd_pool
);
2080 module_exit(iwl_exit
);
2081 module_init(iwl_init
);
2083 #ifdef CONFIG_IWLWIFI_DEBUG
2084 module_param_named(debug
, iwlagn_mod_params
.debug_level
, uint
,
2086 MODULE_PARM_DESC(debug
, "debug output mask");
2089 module_param_named(swcrypto
, iwlagn_mod_params
.sw_crypto
, int, S_IRUGO
);
2090 MODULE_PARM_DESC(swcrypto
, "using crypto in software (default 0 [hardware])");
2091 module_param_named(11n_disable
, iwlagn_mod_params
.disable_11n
, uint
, S_IRUGO
);
2092 MODULE_PARM_DESC(11n_disable
,
2093 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
2094 module_param_named(amsdu_size_8K
, iwlagn_mod_params
.amsdu_size_8K
,
2096 MODULE_PARM_DESC(amsdu_size_8K
, "enable 8K amsdu size");
2097 module_param_named(fw_restart
, iwlagn_mod_params
.restart_fw
, int, S_IRUGO
);
2098 MODULE_PARM_DESC(fw_restart
, "restart firmware in case of error");
2100 module_param_named(ucode_alternative
,
2101 iwlagn_mod_params
.wanted_ucode_alternative
,
2103 MODULE_PARM_DESC(ucode_alternative
,
2104 "specify ucode alternative to use from ucode file");
2106 module_param_named(antenna_coupling
, iwlagn_mod_params
.ant_coupling
,
2108 MODULE_PARM_DESC(antenna_coupling
,
2109 "specify antenna coupling in dB (defualt: 0 dB)");
2111 module_param_named(bt_ch_inhibition
, iwlagn_mod_params
.bt_ch_announce
,
2113 MODULE_PARM_DESC(bt_ch_inhibition
,
2114 "Enable BT channel inhibition (default: enable)");
2116 module_param_named(plcp_check
, iwlagn_mod_params
.plcp_check
, bool, S_IRUGO
);
2117 MODULE_PARM_DESC(plcp_check
, "Check plcp health (default: 1 [enabled])");
2119 module_param_named(ack_check
, iwlagn_mod_params
.ack_check
, bool, S_IRUGO
);
2120 MODULE_PARM_DESC(ack_check
, "Check ack health (default: 0 [disabled])");
2122 module_param_named(wd_disable
, iwlagn_mod_params
.wd_disable
, int, S_IRUGO
);
2123 MODULE_PARM_DESC(wd_disable
,
2124 "Disable stuck queue watchdog timer 0=system default, "
2125 "1=disable, 2=enable (default: 0)");
2128 * set bt_coex_active to true, uCode will do kill/defer
2129 * every time the priority line is asserted (BT is sending signals on the
2130 * priority line in the PCIx).
2131 * set bt_coex_active to false, uCode will ignore the BT activity and
2132 * perform the normal operation
2134 * User might experience transmit issue on some platform due to WiFi/BT
2135 * co-exist problem. The possible behaviors are:
2136 * Able to scan and finding all the available AP
2137 * Not able to associate with any AP
2138 * On those platforms, WiFi communication can be restored by set
2139 * "bt_coex_active" module parameter to "false"
2141 * default: bt_coex_active = true (BT_COEX_ENABLE)
2143 module_param_named(bt_coex_active
, iwlagn_mod_params
.bt_coex_active
,
2145 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bt co-exist (default: enable)");
2147 module_param_named(led_mode
, iwlagn_mod_params
.led_mode
, int, S_IRUGO
);
2148 MODULE_PARM_DESC(led_mode
, "0=system default, "
2149 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
2151 module_param_named(power_save
, iwlagn_mod_params
.power_save
,
2153 MODULE_PARM_DESC(power_save
,
2154 "enable WiFi power management (default: disable)");
2156 module_param_named(power_level
, iwlagn_mod_params
.power_level
,
2158 MODULE_PARM_DESC(power_level
,
2159 "default power save level (range from 1 - 5, default: 1)");
2161 module_param_named(auto_agg
, iwlagn_mod_params
.auto_agg
,
2163 MODULE_PARM_DESC(auto_agg
,
2164 "enable agg w/o check traffic load (default: enable)");
2167 * For now, keep using power level 1 instead of automatically
2170 module_param_named(no_sleep_autoadjust
, iwlagn_mod_params
.no_sleep_autoadjust
,
2172 MODULE_PARM_DESC(no_sleep_autoadjust
,
2173 "don't automatically adjust sleep level "
2174 "according to maximum network latency (default: true)");