iwl3945: Use iwlcore scan code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
01f8162a 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
b481de9c
ZY
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
b481de9c
ZY
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
b481de9c
ZY
32#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/dma-mapping.h>
35#include <linux/delay.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/wireless.h>
39#include <linux/firmware.h>
b481de9c
ZY
40#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/ieee80211_radiotap.h>
7e272fcf 44#include <net/lib80211.h>
b481de9c
ZY
45#include <net/mac80211.h>
46
47#include <asm/div64.h>
48
a3139c59
SO
49#define DRV_NAME "iwl3945"
50
dbb6654c
WT
51#include "iwl-fh.h"
52#include "iwl-3945-fh.h"
600c0e11 53#include "iwl-commands.h"
b481de9c
ZY
54#include "iwl-3945.h"
55#include "iwl-helpers.h"
5747d47f 56#include "iwl-core.h"
d20b3c65 57#include "iwl-dev.h"
b481de9c 58
b481de9c
ZY
59/*
60 * module name, copyright, version, etc.
b481de9c
ZY
61 */
62
63#define DRV_DESCRIPTION \
64"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
c8b0e6e1 66#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
67#define VD "d"
68#else
69#define VD
70#endif
71
c8b0e6e1 72#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
73#define VS "s"
74#else
75#define VS
76#endif
77
eaa686c3 78#define IWL39_VERSION "1.2.26k" VD VS
01f8162a 79#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
a7b75207 80#define DRV_AUTHOR "<ilw@linux.intel.com>"
eaa686c3 81#define DRV_VERSION IWL39_VERSION
b481de9c 82
b481de9c
ZY
83
84MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION);
a7b75207 86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
b481de9c
ZY
87MODULE_LICENSE("GPL");
88
df878d8f
KA
89 /* module parameters */
90struct iwl_mod_params iwl3945_mod_params = {
91 .num_of_queues = IWL39_MAX_NUM_QUEUES,
9c74d9fb 92 .sw_crypto = 1,
df878d8f
KA
93 /* the rest are 0 by default */
94};
95
b481de9c
ZY
96/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
97 * DMA services
98 *
99 * Theory of operation
100 *
6440adb5
BC
101 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
102 * of buffer descriptors, each of which points to one or more data buffers for
103 * the device to read from or fill. Driver and device exchange status of each
104 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
105 * entries in each circular buffer, to protect against confusing empty and full
106 * queue states.
107 *
108 * The device reads or writes the data in the queues via the device's several
109 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
110 *
111 * For Tx queue, there are low mark and high mark limits. If, after queuing
112 * the packet for Tx, free space become < low mark, Tx queue stopped. When
113 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
114 * Tx queue resumed.
115 *
6440adb5
BC
116 * The 3945 operates with six queues: One receive queue, one transmit queue
117 * (#4) for sending commands to the device firmware, and four transmit queues
118 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
b481de9c
ZY
119 ***************************************************/
120
6440adb5
BC
121/**
122 * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
123 */
4a8a4322 124static int iwl3945_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
b481de9c
ZY
125 int count, int slots_num, u32 id)
126{
127 q->n_bd = count;
128 q->n_window = slots_num;
129 q->id = id;
130
c54b679d
TW
131 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
132 * and iwl_queue_dec_wrap are broken. */
b481de9c
ZY
133 BUG_ON(!is_power_of_2(count));
134
135 /* slots_num must be power-of-two size, otherwise
136 * get_cmd_index is broken. */
137 BUG_ON(!is_power_of_2(slots_num));
138
139 q->low_mark = q->n_window / 4;
140 if (q->low_mark < 4)
141 q->low_mark = 4;
142
143 q->high_mark = q->n_window / 8;
144 if (q->high_mark < 2)
145 q->high_mark = 2;
146
fc4b6853 147 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
148
149 return 0;
150}
151
6440adb5
BC
152/**
153 * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
154 */
4a8a4322 155static int iwl3945_tx_queue_alloc(struct iwl_priv *priv,
188cf6c7 156 struct iwl_tx_queue *txq, u32 id)
b481de9c
ZY
157{
158 struct pci_dev *dev = priv->pci_dev;
159
6440adb5
BC
160 /* Driver private data, only for Tx (not command) queues,
161 * not shared with device. */
b481de9c
ZY
162 if (id != IWL_CMD_QUEUE_NUM) {
163 txq->txb = kmalloc(sizeof(txq->txb[0]) *
164 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
165 if (!txq->txb) {
15b1687c 166 IWL_ERR(priv, "kmalloc for auxiliary BD "
b481de9c
ZY
167 "structures failed\n");
168 goto error;
169 }
170 } else
171 txq->txb = NULL;
172
6440adb5
BC
173 /* Circular buffer of transmit frame descriptors (TFDs),
174 * shared with device */
188cf6c7
SO
175 txq->tfds39 = pci_alloc_consistent(dev,
176 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX,
b481de9c
ZY
177 &txq->q.dma_addr);
178
188cf6c7 179 if (!txq->tfds39) {
15b1687c 180 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n",
188cf6c7 181 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX);
b481de9c
ZY
182 goto error;
183 }
184 txq->q.id = id;
185
186 return 0;
187
188 error:
3ac7f146
TW
189 kfree(txq->txb);
190 txq->txb = NULL;
b481de9c
ZY
191
192 return -ENOMEM;
193}
194
6440adb5
BC
195/**
196 * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
197 */
4a8a4322 198int iwl3945_tx_queue_init(struct iwl_priv *priv,
188cf6c7 199 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c 200{
188cf6c7 201 int len, i;
b481de9c
ZY
202 int rc = 0;
203
6440adb5
BC
204 /*
205 * Alloc buffer array for commands (Tx or other types of commands).
206 * For the command queue (#4), allocate command space + one big
207 * command for scan, since scan command is very huge; the system will
208 * not have two scans at the same time, so only one is needed.
209 * For data Tx queues (all other queues), no super-size command
210 * space is needed.
211 */
188cf6c7
SO
212 len = sizeof(struct iwl_cmd);
213 for (i = 0; i <= slots_num; i++) {
214 if (i == slots_num) {
215 if (txq_id == IWL_CMD_QUEUE_NUM)
216 len += IWL_MAX_SCAN_SIZE;
217 else
218 continue;
219 }
220
221 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
222 if (!txq->cmd[i])
223 goto err;
224 }
b481de9c 225
6440adb5 226 /* Alloc driver data array and TFD circular buffer */
bb8c093b 227 rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
188cf6c7
SO
228 if (rc)
229 goto err;
b481de9c 230
b481de9c
ZY
231 txq->need_update = 0;
232
233 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
c54b679d 234 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
b481de9c 235 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
6440adb5
BC
236
237 /* Initialize queue high/low-water, head/tail indexes */
bb8c093b 238 iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 239
6440adb5 240 /* Tell device where to find queue, enable DMA channel. */
bb8c093b 241 iwl3945_hw_tx_queue_init(priv, txq);
b481de9c
ZY
242
243 return 0;
188cf6c7
SO
244err:
245 for (i = 0; i < slots_num; i++) {
246 kfree(txq->cmd[i]);
247 txq->cmd[i] = NULL;
248 }
249
250 if (txq_id == IWL_CMD_QUEUE_NUM) {
251 kfree(txq->cmd[slots_num]);
252 txq->cmd[slots_num] = NULL;
253 }
254 return -ENOMEM;
b481de9c
ZY
255}
256
257/**
bb8c093b 258 * iwl3945_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
259 * @txq: Transmit queue to deallocate.
260 *
261 * Empty queue by removing and destroying all BD's.
6440adb5
BC
262 * Free all buffers.
263 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 264 */
188cf6c7 265void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
b481de9c 266{
d20b3c65 267 struct iwl_queue *q = &txq->q;
b481de9c 268 struct pci_dev *dev = priv->pci_dev;
188cf6c7 269 int len, i;
b481de9c
ZY
270
271 if (q->n_bd == 0)
272 return;
273
274 /* first, empty all BD's */
fc4b6853 275 for (; q->write_ptr != q->read_ptr;
c54b679d 276 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
7aaa1d79 277 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
b481de9c 278
c2d79b48 279 len = sizeof(struct iwl_cmd) * q->n_window;
b481de9c
ZY
280 if (q->id == IWL_CMD_QUEUE_NUM)
281 len += IWL_MAX_SCAN_SIZE;
282
6440adb5 283 /* De-alloc array of command/tx buffers */
188cf6c7
SO
284 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
285 kfree(txq->cmd[i]);
b481de9c 286
6440adb5 287 /* De-alloc circular buffer of TFDs */
b481de9c 288 if (txq->q.n_bd)
dbb6654c 289 pci_free_consistent(dev, sizeof(struct iwl3945_tfd) *
188cf6c7 290 txq->q.n_bd, txq->tfds39, txq->q.dma_addr);
b481de9c 291
6440adb5 292 /* De-alloc array of per-TFD driver data */
3ac7f146
TW
293 kfree(txq->txb);
294 txq->txb = NULL;
b481de9c 295
6440adb5 296 /* 0-fill queue descriptor structure */
b481de9c
ZY
297 memset(txq, 0, sizeof(*txq));
298}
299
b481de9c 300/*************** STATION TABLE MANAGEMENT ****
9fbab516 301 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
302 * the functionality provided here
303 */
304
305/**************************************************************/
01ebd063 306#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
307/**
308 * iwl3945_remove_station - Remove driver's knowledge of station.
309 *
310 * NOTE: This does not remove station from device's station table.
311 */
4a8a4322 312static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
313{
314 int index = IWL_INVALID_STATION;
315 int i;
316 unsigned long flags;
317
318 spin_lock_irqsave(&priv->sta_lock, flags);
319
320 if (is_ap)
321 index = IWL_AP_ID;
322 else if (is_broadcast_ether_addr(addr))
3832ec9d 323 index = priv->hw_params.bcast_sta_id;
b481de9c 324 else
3832ec9d 325 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
f2c7e521
AK
326 if (priv->stations_39[i].used &&
327 !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
b481de9c
ZY
328 addr)) {
329 index = i;
330 break;
331 }
332
333 if (unlikely(index == IWL_INVALID_STATION))
334 goto out;
335
f2c7e521
AK
336 if (priv->stations_39[index].used) {
337 priv->stations_39[index].used = 0;
b481de9c
ZY
338 priv->num_stations--;
339 }
340
341 BUG_ON(priv->num_stations < 0);
342
343out:
344 spin_unlock_irqrestore(&priv->sta_lock, flags);
345 return 0;
346}
556f8db7 347#endif
6440adb5
BC
348
349/**
350 * iwl3945_clear_stations_table - Clear the driver's station table
351 *
352 * NOTE: This does not clear or otherwise alter the device's station table.
353 */
4a8a4322 354static void iwl3945_clear_stations_table(struct iwl_priv *priv)
b481de9c
ZY
355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&priv->sta_lock, flags);
359
360 priv->num_stations = 0;
f2c7e521 361 memset(priv->stations_39, 0, sizeof(priv->stations_39));
b481de9c
ZY
362
363 spin_unlock_irqrestore(&priv->sta_lock, flags);
364}
365
6440adb5
BC
366/**
367 * iwl3945_add_station - Add station to station tables in driver and device
368 */
4a8a4322 369u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
b481de9c
ZY
370{
371 int i;
372 int index = IWL_INVALID_STATION;
bb8c093b 373 struct iwl3945_station_entry *station;
b481de9c 374 unsigned long flags_spin;
c14c521e 375 u8 rate;
b481de9c
ZY
376
377 spin_lock_irqsave(&priv->sta_lock, flags_spin);
378 if (is_ap)
379 index = IWL_AP_ID;
380 else if (is_broadcast_ether_addr(addr))
3832ec9d 381 index = priv->hw_params.bcast_sta_id;
b481de9c 382 else
3832ec9d 383 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
f2c7e521 384 if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
b481de9c
ZY
385 addr)) {
386 index = i;
387 break;
388 }
389
f2c7e521 390 if (!priv->stations_39[i].used &&
b481de9c
ZY
391 index == IWL_INVALID_STATION)
392 index = i;
393 }
394
01ebd063 395 /* These two conditions has the same outcome but keep them separate
b481de9c
ZY
396 since they have different meaning */
397 if (unlikely(index == IWL_INVALID_STATION)) {
398 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
399 return index;
400 }
401
f2c7e521
AK
402 if (priv->stations_39[index].used &&
403 !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
b481de9c
ZY
404 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
405 return index;
406 }
407
e174961c 408 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
f2c7e521 409 station = &priv->stations_39[index];
b481de9c
ZY
410 station->used = 1;
411 priv->num_stations++;
412
6440adb5 413 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 414 memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
b481de9c
ZY
415 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
416 station->sta.mode = 0;
417 station->sta.sta.sta_id = index;
418 station->sta.station_flags = 0;
419
8318d78a 420 if (priv->band == IEEE80211_BAND_5GHZ)
69946333
TW
421 rate = IWL_RATE_6M_PLCP;
422 else
423 rate = IWL_RATE_1M_PLCP;
c14c521e
ZY
424
425 /* Turn on both antennas for the station... */
426 station->sta.rate_n_flags =
bb8c093b 427 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
c14c521e 428
b481de9c 429 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
430
431 /* Add station to device's station table */
bb8c093b 432 iwl3945_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
433 return index;
434
435}
436
518099a8 437int iwl3945_send_statistics_request(struct iwl_priv *priv)
b481de9c 438{
518099a8 439 u32 val = 0;
b481de9c 440
c2d79b48 441 struct iwl_host_cmd cmd = {
518099a8 442 .id = REPLY_STATISTICS_CMD,
b481de9c
ZY
443 .len = sizeof(val),
444 .data = &val,
445 };
446
518099a8 447 return iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
448}
449
b481de9c 450/**
bb8c093b 451 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
8318d78a
JB
452 * @band: 2.4 or 5 GHz band
453 * @channel: Any channel valid for the requested band
b481de9c 454
8318d78a 455 * In addition to setting the staging RXON, priv->band is also set.
b481de9c
ZY
456 *
457 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
8318d78a 458 * in the staging RXON flag structure based on the band
b481de9c 459 */
4a8a4322 460static int iwl3945_set_rxon_channel(struct iwl_priv *priv,
8318d78a
JB
461 enum ieee80211_band band,
462 u16 channel)
b481de9c 463{
8318d78a 464 if (!iwl3945_get_channel_info(priv, band, channel)) {
b481de9c 465 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
8318d78a 466 channel, band);
b481de9c
ZY
467 return -EINVAL;
468 }
469
f2c7e521 470 if ((le16_to_cpu(priv->staging39_rxon.channel) == channel) &&
8318d78a 471 (priv->band == band))
b481de9c
ZY
472 return 0;
473
f2c7e521 474 priv->staging39_rxon.channel = cpu_to_le16(channel);
8318d78a 475 if (band == IEEE80211_BAND_5GHZ)
f2c7e521 476 priv->staging39_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
b481de9c 477 else
f2c7e521 478 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
b481de9c 479
8318d78a 480 priv->band = band;
b481de9c 481
8318d78a 482 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
b481de9c
ZY
483
484 return 0;
485}
486
487/**
bb8c093b 488 * iwl3945_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
489 *
490 * NOTE: This is really only useful during development and can eventually
491 * be #ifdef'd out once the driver is stable and folks aren't actively
492 * making changes
493 */
4a8a4322 494static int iwl3945_check_rxon_cmd(struct iwl_priv *priv)
b481de9c
ZY
495{
496 int error = 0;
497 int counter = 1;
f2c7e521 498 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
b481de9c
ZY
499
500 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
501 error |= le32_to_cpu(rxon->flags &
502 (RXON_FLG_TGJ_NARROW_BAND_MSK |
503 RXON_FLG_RADAR_DETECT_MSK));
504 if (error)
39aadf8c 505 IWL_WARN(priv, "check 24G fields %d | %d\n",
b481de9c
ZY
506 counter++, error);
507 } else {
508 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
509 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
510 if (error)
39aadf8c 511 IWL_WARN(priv, "check 52 fields %d | %d\n",
b481de9c
ZY
512 counter++, error);
513 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
514 if (error)
39aadf8c 515 IWL_WARN(priv, "check 52 CCK %d | %d\n",
b481de9c
ZY
516 counter++, error);
517 }
518 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
519 if (error)
39aadf8c 520 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
b481de9c
ZY
521
522 /* make sure basic rates 6Mbps and 1Mbps are supported */
523 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
524 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
525 if (error)
39aadf8c 526 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
b481de9c
ZY
527
528 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
529 if (error)
39aadf8c 530 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
b481de9c
ZY
531
532 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
533 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
534 if (error)
39aadf8c 535 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
b481de9c
ZY
536 counter++, error);
537
538 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
539 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
540 if (error)
39aadf8c 541 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
b481de9c
ZY
542 counter++, error);
543
544 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
545 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
546 if (error)
39aadf8c 547 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
b481de9c
ZY
548 counter++, error);
549
550 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
551 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
552 RXON_FLG_ANT_A_MSK)) == 0);
553 if (error)
39aadf8c 554 IWL_WARN(priv, "check antenna %d %d\n", counter++, error);
b481de9c
ZY
555
556 if (error)
39aadf8c 557 IWL_WARN(priv, "Tuning to channel %d\n",
b481de9c
ZY
558 le16_to_cpu(rxon->channel));
559
560 if (error) {
15b1687c 561 IWL_ERR(priv, "Not a valid rxon_assoc_cmd field values\n");
b481de9c
ZY
562 return -1;
563 }
564 return 0;
565}
566
567/**
9fbab516 568 * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 569 * @priv: staging_rxon is compared to active_rxon
b481de9c 570 *
9fbab516
BC
571 * If the RXON structure is changing enough to require a new tune,
572 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
573 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 574 */
4a8a4322 575static int iwl3945_full_rxon_required(struct iwl_priv *priv)
b481de9c
ZY
576{
577
578 /* These items are only settable from the full RXON command */
5d1e2325 579 if (!(iwl3945_is_associated(priv)) ||
f2c7e521
AK
580 compare_ether_addr(priv->staging39_rxon.bssid_addr,
581 priv->active39_rxon.bssid_addr) ||
582 compare_ether_addr(priv->staging39_rxon.node_addr,
583 priv->active39_rxon.node_addr) ||
584 compare_ether_addr(priv->staging39_rxon.wlap_bssid_addr,
585 priv->active39_rxon.wlap_bssid_addr) ||
586 (priv->staging39_rxon.dev_type != priv->active39_rxon.dev_type) ||
587 (priv->staging39_rxon.channel != priv->active39_rxon.channel) ||
588 (priv->staging39_rxon.air_propagation !=
589 priv->active39_rxon.air_propagation) ||
590 (priv->staging39_rxon.assoc_id != priv->active39_rxon.assoc_id))
b481de9c
ZY
591 return 1;
592
593 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
594 * be updated with the RXON_ASSOC command -- however only some
595 * flag transitions are allowed using RXON_ASSOC */
596
597 /* Check if we are not switching bands */
f2c7e521
AK
598 if ((priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
599 (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK))
b481de9c
ZY
600 return 1;
601
602 /* Check if we are switching association toggle */
f2c7e521
AK
603 if ((priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
604 (priv->active39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
b481de9c
ZY
605 return 1;
606
607 return 0;
608}
609
4a8a4322 610static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
b481de9c
ZY
611{
612 int rc = 0;
3d24a9f7 613 struct iwl_rx_packet *res = NULL;
bb8c093b 614 struct iwl3945_rxon_assoc_cmd rxon_assoc;
c2d79b48 615 struct iwl_host_cmd cmd = {
b481de9c
ZY
616 .id = REPLY_RXON_ASSOC,
617 .len = sizeof(rxon_assoc),
618 .meta.flags = CMD_WANT_SKB,
619 .data = &rxon_assoc,
620 };
f2c7e521
AK
621 const struct iwl3945_rxon_cmd *rxon1 = &priv->staging39_rxon;
622 const struct iwl3945_rxon_cmd *rxon2 = &priv->active39_rxon;
b481de9c
ZY
623
624 if ((rxon1->flags == rxon2->flags) &&
625 (rxon1->filter_flags == rxon2->filter_flags) &&
626 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
627 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
628 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
629 return 0;
630 }
631
f2c7e521
AK
632 rxon_assoc.flags = priv->staging39_rxon.flags;
633 rxon_assoc.filter_flags = priv->staging39_rxon.filter_flags;
634 rxon_assoc.ofdm_basic_rates = priv->staging39_rxon.ofdm_basic_rates;
635 rxon_assoc.cck_basic_rates = priv->staging39_rxon.cck_basic_rates;
b481de9c
ZY
636 rxon_assoc.reserved = 0;
637
518099a8 638 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
639 if (rc)
640 return rc;
641
3d24a9f7 642 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 643 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 644 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
b481de9c
ZY
645 rc = -EIO;
646 }
647
648 priv->alloc_rxb_skb--;
649 dev_kfree_skb_any(cmd.meta.u.skb);
650
651 return rc;
652}
653
654/**
bb8c093b 655 * iwl3945_commit_rxon - commit staging_rxon to hardware
b481de9c 656 *
01ebd063 657 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
658 * the active_rxon structure is updated with the new data. This
659 * function correctly transitions out of the RXON_ASSOC_MSK state if
660 * a HW tune is required based on the RXON structure changes.
661 */
4a8a4322 662static int iwl3945_commit_rxon(struct iwl_priv *priv)
b481de9c
ZY
663{
664 /* cast away the const for active_rxon in this function */
f2c7e521 665 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active39_rxon;
b481de9c
ZY
666 int rc = 0;
667
775a6e27 668 if (!iwl_is_alive(priv))
b481de9c
ZY
669 return -1;
670
671 /* always get timestamp with Rx frame */
f2c7e521 672 priv->staging39_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
b481de9c
ZY
673
674 /* select antenna */
f2c7e521 675 priv->staging39_rxon.flags &=
b481de9c 676 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
f2c7e521 677 priv->staging39_rxon.flags |= iwl3945_get_antenna_flags(priv);
b481de9c 678
a3139c59 679 rc = iwl3945_check_rxon_cmd(priv);
b481de9c 680 if (rc) {
15b1687c 681 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
b481de9c
ZY
682 return -EINVAL;
683 }
684
685 /* If we don't need to send a full RXON, we can use
bb8c093b 686 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 687 * and other flags for the current radio configuration. */
bb8c093b
CH
688 if (!iwl3945_full_rxon_required(priv)) {
689 rc = iwl3945_send_rxon_assoc(priv);
b481de9c 690 if (rc) {
15b1687c 691 IWL_ERR(priv, "Error setting RXON_ASSOC "
b481de9c
ZY
692 "configuration (%d).\n", rc);
693 return rc;
694 }
695
f2c7e521 696 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
b481de9c
ZY
697
698 return 0;
699 }
700
701 /* If we are currently associated and the new config requires
702 * an RXON_ASSOC and the new config wants the associated mask enabled,
703 * we must clear the associated from the active configuration
704 * before we apply the new config */
bb8c093b 705 if (iwl3945_is_associated(priv) &&
f2c7e521 706 (priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
b481de9c
ZY
707 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
708 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
709
518099a8 710 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
bb8c093b 711 sizeof(struct iwl3945_rxon_cmd),
f2c7e521 712 &priv->active39_rxon);
b481de9c
ZY
713
714 /* If the mask clearing failed then we set
715 * active_rxon back to what it was previously */
716 if (rc) {
717 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
15b1687c 718 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
b481de9c
ZY
719 "configuration (%d).\n", rc);
720 return rc;
721 }
b481de9c
ZY
722 }
723
724 IWL_DEBUG_INFO("Sending RXON\n"
725 "* with%s RXON_FILTER_ASSOC_MSK\n"
726 "* channel = %d\n"
e174961c 727 "* bssid = %pM\n",
f2c7e521 728 ((priv->staging39_rxon.filter_flags &
b481de9c 729 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
f2c7e521 730 le16_to_cpu(priv->staging39_rxon.channel),
e174961c 731 priv->staging_rxon.bssid_addr);
b481de9c
ZY
732
733 /* Apply the new configuration */
518099a8 734 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
f2c7e521 735 sizeof(struct iwl3945_rxon_cmd), &priv->staging39_rxon);
b481de9c 736 if (rc) {
15b1687c 737 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
b481de9c
ZY
738 return rc;
739 }
740
f2c7e521 741 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
b481de9c 742
bb8c093b 743 iwl3945_clear_stations_table(priv);
556f8db7 744
b481de9c
ZY
745 /* If we issue a new RXON command which required a tune then we must
746 * send a new TXPOWER command or we won't be able to Tx any frames */
75bcfae9 747 rc = priv->cfg->ops->lib->send_tx_power(priv);
b481de9c 748 if (rc) {
15b1687c 749 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
b481de9c
ZY
750 return rc;
751 }
752
753 /* Add the broadcast address so we can send broadcast frames */
b5323d36 754 if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
b481de9c 755 IWL_INVALID_STATION) {
15b1687c 756 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
b481de9c
ZY
757 return -EIO;
758 }
759
760 /* If we have set the ASSOC_MSK and we are in BSS mode then
761 * add the IWL_AP_ID to the station rate table */
bb8c093b 762 if (iwl3945_is_associated(priv) &&
05c914fe 763 (priv->iw_mode == NL80211_IFTYPE_STATION))
f2c7e521 764 if (iwl3945_add_station(priv, priv->active39_rxon.bssid_addr, 1, 0)
b481de9c 765 == IWL_INVALID_STATION) {
15b1687c 766 IWL_ERR(priv, "Error adding AP address for transmit\n");
b481de9c
ZY
767 return -EIO;
768 }
769
8318d78a 770 /* Init the hardware's rate fallback order based on the band */
b481de9c
ZY
771 rc = iwl3945_init_hw_rate_table(priv);
772 if (rc) {
15b1687c 773 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
b481de9c
ZY
774 return -EIO;
775 }
776
777 return 0;
778}
779
4a8a4322 780static int iwl3945_send_bt_config(struct iwl_priv *priv)
b481de9c 781{
4c897253 782 struct iwl_bt_cmd bt_cmd = {
b481de9c
ZY
783 .flags = 3,
784 .lead_time = 0xAA,
785 .max_kill = 1,
786 .kill_ack_mask = 0,
787 .kill_cts_mask = 0,
788 };
789
518099a8 790 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
4c897253 791 sizeof(bt_cmd), &bt_cmd);
b481de9c
ZY
792}
793
4a8a4322 794static int iwl3945_add_sta_sync_callback(struct iwl_priv *priv,
c2d79b48 795 struct iwl_cmd *cmd, struct sk_buff *skb)
b481de9c 796{
3d24a9f7 797 struct iwl_rx_packet *res = NULL;
b481de9c
ZY
798
799 if (!skb) {
15b1687c 800 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
b481de9c
ZY
801 return 1;
802 }
803
3d24a9f7 804 res = (struct iwl_rx_packet *)skb->data;
b481de9c 805 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 806 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
b481de9c
ZY
807 res->hdr.flags);
808 return 1;
809 }
810
811 switch (res->u.add_sta.status) {
812 case ADD_STA_SUCCESS_MSK:
813 break;
814 default:
815 break;
816 }
817
818 /* We didn't cache the SKB; let the caller free it */
819 return 1;
820}
821
4a8a4322 822int iwl3945_send_add_station(struct iwl_priv *priv,
bb8c093b 823 struct iwl3945_addsta_cmd *sta, u8 flags)
b481de9c 824{
3d24a9f7 825 struct iwl_rx_packet *res = NULL;
b481de9c 826 int rc = 0;
c2d79b48 827 struct iwl_host_cmd cmd = {
b481de9c 828 .id = REPLY_ADD_STA,
bb8c093b 829 .len = sizeof(struct iwl3945_addsta_cmd),
b481de9c
ZY
830 .meta.flags = flags,
831 .data = sta,
832 };
833
834 if (flags & CMD_ASYNC)
bb8c093b 835 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
b481de9c
ZY
836 else
837 cmd.meta.flags |= CMD_WANT_SKB;
838
518099a8 839 rc = iwl_send_cmd(priv, &cmd);
b481de9c
ZY
840
841 if (rc || (flags & CMD_ASYNC))
842 return rc;
843
3d24a9f7 844 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 845 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 846 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
b481de9c
ZY
847 res->hdr.flags);
848 rc = -EIO;
849 }
850
851 if (rc == 0) {
852 switch (res->u.add_sta.status) {
853 case ADD_STA_SUCCESS_MSK:
854 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
855 break;
856 default:
857 rc = -EIO;
39aadf8c 858 IWL_WARN(priv, "REPLY_ADD_STA failed\n");
b481de9c
ZY
859 break;
860 }
861 }
862
863 priv->alloc_rxb_skb--;
864 dev_kfree_skb_any(cmd.meta.u.skb);
865
866 return rc;
867}
868
4a8a4322 869static int iwl3945_update_sta_key_info(struct iwl_priv *priv,
b481de9c
ZY
870 struct ieee80211_key_conf *keyconf,
871 u8 sta_id)
872{
873 unsigned long flags;
874 __le16 key_flags = 0;
875
876 switch (keyconf->alg) {
877 case ALG_CCMP:
878 key_flags |= STA_KEY_FLG_CCMP;
879 key_flags |= cpu_to_le16(
880 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
881 key_flags &= ~STA_KEY_FLG_INVALID;
882 break;
883 case ALG_TKIP:
884 case ALG_WEP:
b481de9c
ZY
885 default:
886 return -EINVAL;
887 }
888 spin_lock_irqsave(&priv->sta_lock, flags);
f2c7e521
AK
889 priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
890 priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
891 memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
b481de9c
ZY
892 keyconf->keylen);
893
f2c7e521 894 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
b481de9c 895 keyconf->keylen);
f2c7e521
AK
896 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
897 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
898 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
b481de9c
ZY
899
900 spin_unlock_irqrestore(&priv->sta_lock, flags);
901
902 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
f2c7e521 903 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
b481de9c
ZY
904 return 0;
905}
906
4a8a4322 907static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
b481de9c
ZY
908{
909 unsigned long flags;
910
911 spin_lock_irqsave(&priv->sta_lock, flags);
f2c7e521
AK
912 memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
913 memset(&priv->stations_39[sta_id].sta.key, 0,
4c897253 914 sizeof(struct iwl4965_keyinfo));
f2c7e521
AK
915 priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
916 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
917 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
b481de9c
ZY
918 spin_unlock_irqrestore(&priv->sta_lock, flags);
919
920 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
f2c7e521 921 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
b481de9c
ZY
922 return 0;
923}
924
4a8a4322 925static void iwl3945_clear_free_frames(struct iwl_priv *priv)
b481de9c
ZY
926{
927 struct list_head *element;
928
929 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
930 priv->frames_count);
931
932 while (!list_empty(&priv->free_frames)) {
933 element = priv->free_frames.next;
934 list_del(element);
bb8c093b 935 kfree(list_entry(element, struct iwl3945_frame, list));
b481de9c
ZY
936 priv->frames_count--;
937 }
938
939 if (priv->frames_count) {
39aadf8c 940 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
b481de9c
ZY
941 priv->frames_count);
942 priv->frames_count = 0;
943 }
944}
945
4a8a4322 946static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
b481de9c 947{
bb8c093b 948 struct iwl3945_frame *frame;
b481de9c
ZY
949 struct list_head *element;
950 if (list_empty(&priv->free_frames)) {
951 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
952 if (!frame) {
15b1687c 953 IWL_ERR(priv, "Could not allocate frame!\n");
b481de9c
ZY
954 return NULL;
955 }
956
957 priv->frames_count++;
958 return frame;
959 }
960
961 element = priv->free_frames.next;
962 list_del(element);
bb8c093b 963 return list_entry(element, struct iwl3945_frame, list);
b481de9c
ZY
964}
965
4a8a4322 966static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
b481de9c
ZY
967{
968 memset(frame, 0, sizeof(*frame));
969 list_add(&frame->list, &priv->free_frames);
970}
971
4a8a4322 972unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
b481de9c 973 struct ieee80211_hdr *hdr,
73ec1cc2 974 int left)
b481de9c
ZY
975{
976
bb8c093b 977 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
05c914fe
JB
978 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
979 (priv->iw_mode != NL80211_IFTYPE_AP)))
b481de9c
ZY
980 return 0;
981
982 if (priv->ibss_beacon->len > left)
983 return 0;
984
985 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
986
987 return priv->ibss_beacon->len;
988}
989
4a8a4322 990static u8 iwl3945_rate_get_lowest_plcp(struct iwl_priv *priv)
b481de9c
ZY
991{
992 u8 i;
c24f0817
KA
993 int rate_mask;
994
995 /* Set rate mask*/
f2c7e521 996 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
dbce56a4 997 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
c24f0817 998 else
dbce56a4 999 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
b481de9c
ZY
1000
1001 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1002 i = iwl3945_rates[i].next_ieee) {
b481de9c 1003 if (rate_mask & (1 << i))
bb8c093b 1004 return iwl3945_rates[i].plcp;
b481de9c
ZY
1005 }
1006
c24f0817 1007 /* No valid rate was found. Assign the lowest one */
f2c7e521 1008 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
c24f0817
KA
1009 return IWL_RATE_1M_PLCP;
1010 else
1011 return IWL_RATE_6M_PLCP;
b481de9c
ZY
1012}
1013
4a8a4322 1014static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
b481de9c 1015{
bb8c093b 1016 struct iwl3945_frame *frame;
b481de9c
ZY
1017 unsigned int frame_size;
1018 int rc;
1019 u8 rate;
1020
bb8c093b 1021 frame = iwl3945_get_free_frame(priv);
b481de9c
ZY
1022
1023 if (!frame) {
15b1687c 1024 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
b481de9c
ZY
1025 "command.\n");
1026 return -ENOMEM;
1027 }
1028
c24f0817 1029 rate = iwl3945_rate_get_lowest_plcp(priv);
b481de9c 1030
bb8c093b 1031 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1032
518099a8 1033 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1034 &frame->u.cmd[0]);
1035
bb8c093b 1036 iwl3945_free_frame(priv, frame);
b481de9c
ZY
1037
1038 return rc;
1039}
1040
1041/******************************************************************************
1042 *
1043 * EEPROM related functions
1044 *
1045 ******************************************************************************/
1046
4a8a4322 1047static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
b481de9c 1048{
f2c7e521 1049 memcpy(mac, priv->eeprom39.mac_address, 6);
b481de9c
ZY
1050}
1051
74a3a250
RC
1052/*
1053 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1054 * embedded controller) as EEPROM reader; each read is a series of pulses
1055 * to/from the EEPROM chip, not a single event, so even reads could conflict
1056 * if they weren't arbitrated by some ownership mechanism. Here, the driver
1057 * simply claims ownership, which should be safe when this function is called
1058 * (i.e. before loading uCode!).
1059 */
4a8a4322 1060static inline int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
74a3a250 1061{
5d49f498 1062 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
74a3a250
RC
1063 return 0;
1064}
1065
b481de9c 1066/**
bb8c093b 1067 * iwl3945_eeprom_init - read EEPROM contents
b481de9c 1068 *
f2c7e521 1069 * Load the EEPROM contents from adapter into priv->eeprom39
b481de9c
ZY
1070 *
1071 * NOTE: This routine uses the non-debug IO access functions.
1072 */
4a8a4322 1073int iwl3945_eeprom_init(struct iwl_priv *priv)
b481de9c 1074{
f2c7e521 1075 u16 *e = (u16 *)&priv->eeprom39;
5d49f498 1076 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
f2c7e521 1077 int sz = sizeof(priv->eeprom39);
3d5717ad 1078 int ret;
b481de9c
ZY
1079 u16 addr;
1080
1081 /* The EEPROM structure has several padding buffers within it
1082 * and when adding new EEPROM maps is subject to programmer errors
1083 * which may be very difficult to identify without explicitly
1084 * checking the resulting size of the eeprom map. */
f2c7e521 1085 BUILD_BUG_ON(sizeof(priv->eeprom39) != IWL_EEPROM_IMAGE_SIZE);
b481de9c
ZY
1086
1087 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
15b1687c 1088 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
b481de9c
ZY
1089 return -ENOENT;
1090 }
1091
6440adb5 1092 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
3d5717ad
ZY
1093 ret = iwl3945_eeprom_acquire_semaphore(priv);
1094 if (ret < 0) {
15b1687c 1095 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1096 return -ENOENT;
1097 }
1098
1099 /* eeprom is an array of 16bit values */
1100 for (addr = 0; addr < sz; addr += sizeof(u16)) {
3d5717ad 1101 u32 r;
b481de9c 1102
5d49f498 1103 _iwl_write32(priv, CSR_EEPROM_REG,
3d5717ad 1104 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
5d49f498
AK
1105 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1106 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
3d5717ad
ZY
1107 CSR_EEPROM_REG_READ_VALID_MSK,
1108 IWL_EEPROM_ACCESS_TIMEOUT);
1109 if (ret < 0) {
15b1687c 1110 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
3d5717ad 1111 return ret;
b481de9c 1112 }
3d5717ad 1113
5d49f498 1114 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
58ff6d4d 1115 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
b481de9c
ZY
1116 }
1117
1118 return 0;
1119}
1120
4a8a4322 1121static void iwl3945_unset_hw_params(struct iwl_priv *priv)
b481de9c 1122{
3832ec9d 1123 if (priv->shared_virt)
b481de9c 1124 pci_free_consistent(priv->pci_dev,
bb8c093b 1125 sizeof(struct iwl3945_shared),
3832ec9d
AK
1126 priv->shared_virt,
1127 priv->shared_phys);
b481de9c
ZY
1128}
1129
b481de9c
ZY
1130/*
1131 * QoS support
1132*/
4a8a4322 1133static int iwl3945_send_qos_params_command(struct iwl_priv *priv,
4c897253 1134 struct iwl_qosparam_cmd *qos)
b481de9c
ZY
1135{
1136
518099a8 1137 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
4c897253 1138 sizeof(struct iwl_qosparam_cmd), qos);
b481de9c
ZY
1139}
1140
4a8a4322 1141static void iwl3945_activate_qos(struct iwl_priv *priv, u8 force)
b481de9c
ZY
1142{
1143 unsigned long flags;
1144
b481de9c
ZY
1145 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1146 return;
1147
b481de9c
ZY
1148 spin_lock_irqsave(&priv->lock, flags);
1149 priv->qos_data.def_qos_parm.qos_flags = 0;
1150
1151 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1152 !priv->qos_data.qos_cap.q_AP.txop_request)
1153 priv->qos_data.def_qos_parm.qos_flags |=
1154 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1155
1156 if (priv->qos_data.qos_active)
1157 priv->qos_data.def_qos_parm.qos_flags |=
1158 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1159
1160 spin_unlock_irqrestore(&priv->lock, flags);
1161
bb8c093b 1162 if (force || iwl3945_is_associated(priv)) {
a96a27f9 1163 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
b481de9c
ZY
1164 priv->qos_data.qos_active);
1165
bb8c093b 1166 iwl3945_send_qos_params_command(priv,
b481de9c
ZY
1167 &(priv->qos_data.def_qos_parm));
1168 }
1169}
1170
b481de9c
ZY
1171/*
1172 * Power management (not Tx power!) functions
1173 */
1174#define MSEC_TO_USEC 1024
1175
600c0e11 1176
b481de9c 1177/* default power management (not Tx power) table values */
a96a27f9 1178/* for TIM 0-10 */
3dae0c42
WT
1179static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
1180 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1181 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1182 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1183 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1184 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1185 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
b481de9c
ZY
1186};
1187
a96a27f9 1188/* for TIM > 10 */
3dae0c42
WT
1189static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
1190 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1191 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1192 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1193 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1194 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1195 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
b481de9c
ZY
1196};
1197
4a8a4322 1198int iwl3945_power_init_handle(struct iwl_priv *priv)
b481de9c
ZY
1199{
1200 int rc = 0, i;
3dae0c42
WT
1201 struct iwl_power_mgr *pow_data;
1202 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
b481de9c
ZY
1203 u16 pci_pm;
1204
1205 IWL_DEBUG_POWER("Initialize power \n");
1206
3dae0c42 1207 pow_data = &priv->power_data;
b481de9c
ZY
1208
1209 memset(pow_data, 0, sizeof(*pow_data));
1210
3dae0c42 1211 pow_data->dtim_period = 1;
b481de9c
ZY
1212
1213 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1214 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1215
1216 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1217 if (rc != 0)
1218 return 0;
1219 else {
600c0e11 1220 struct iwl_powertable_cmd *cmd;
b481de9c
ZY
1221
1222 IWL_DEBUG_POWER("adjust power command flags\n");
1223
3dae0c42 1224 for (i = 0; i < IWL_POWER_MAX; i++) {
b481de9c
ZY
1225 cmd = &pow_data->pwr_range_0[i].cmd;
1226
1227 if (pci_pm & 0x1)
1228 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1229 else
1230 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1231 }
1232 }
1233 return rc;
1234}
1235
4a8a4322 1236static int iwl3945_update_power_cmd(struct iwl_priv *priv,
600c0e11 1237 struct iwl_powertable_cmd *cmd, u32 mode)
b481de9c 1238{
3dae0c42 1239 struct iwl_power_mgr *pow_data;
1125eff3 1240 struct iwl_power_vec_entry *range;
3dae0c42
WT
1241 u32 max_sleep = 0;
1242 int i;
b481de9c 1243 u8 period = 0;
3dae0c42 1244 bool skip;
b481de9c
ZY
1245
1246 if (mode > IWL_POWER_INDEX_5) {
1247 IWL_DEBUG_POWER("Error invalid power mode \n");
3dae0c42 1248 return -EINVAL;
b481de9c 1249 }
3dae0c42 1250 pow_data = &priv->power_data;
b481de9c 1251
3dae0c42 1252 if (pow_data->dtim_period < 10)
b481de9c
ZY
1253 range = &pow_data->pwr_range_0[0];
1254 else
1255 range = &pow_data->pwr_range_1[1];
1256
bb8c093b 1257 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
b481de9c 1258
b481de9c
ZY
1259
1260 if (period == 0) {
1261 period = 1;
3dae0c42
WT
1262 skip = false;
1263 } else {
1264 skip = !!range[mode].no_dtim;
b481de9c
ZY
1265 }
1266
3dae0c42 1267 if (skip) {
b481de9c
ZY
1268 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1269 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1270 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
3dae0c42
WT
1271 } else {
1272 max_sleep = period;
1273 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
b481de9c
ZY
1274 }
1275
3dae0c42 1276 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
b481de9c
ZY
1277 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1278 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
b481de9c
ZY
1279
1280 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1281 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1282 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1283 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1284 le32_to_cpu(cmd->sleep_interval[0]),
1285 le32_to_cpu(cmd->sleep_interval[1]),
1286 le32_to_cpu(cmd->sleep_interval[2]),
1287 le32_to_cpu(cmd->sleep_interval[3]),
1288 le32_to_cpu(cmd->sleep_interval[4]));
1289
3dae0c42 1290 return 0;
b481de9c
ZY
1291}
1292
4a8a4322 1293static int iwl3945_send_power_mode(struct iwl_priv *priv, u32 mode)
b481de9c 1294{
9a62f73b 1295 u32 uninitialized_var(final_mode);
b481de9c 1296 int rc;
600c0e11 1297 struct iwl_powertable_cmd cmd;
b481de9c
ZY
1298
1299 /* If on battery, set to 3,
01ebd063 1300 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
1301 * else user level */
1302 switch (mode) {
1125eff3 1303 case IWL39_POWER_BATTERY:
b481de9c
ZY
1304 final_mode = IWL_POWER_INDEX_3;
1305 break;
1125eff3 1306 case IWL39_POWER_AC:
b481de9c
ZY
1307 final_mode = IWL_POWER_MODE_CAM;
1308 break;
1309 default:
1310 final_mode = mode;
1311 break;
1312 }
1313
bb8c093b 1314 iwl3945_update_power_cmd(priv, &cmd, final_mode);
b481de9c 1315
600c0e11 1316 /* FIXME use get_hcmd_size 3945 command is 4 bytes shorter */
518099a8
SO
1317 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
1318 sizeof(struct iwl3945_powertable_cmd), &cmd);
b481de9c
ZY
1319
1320 if (final_mode == IWL_POWER_MODE_CAM)
1321 clear_bit(STATUS_POWER_PMI, &priv->status);
1322 else
1323 set_bit(STATUS_POWER_PMI, &priv->status);
1324
1325 return rc;
1326}
1327
b481de9c
ZY
1328#define MAX_UCODE_BEACON_INTERVAL 1024
1329#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
1330
bb8c093b 1331static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
1332{
1333 u16 new_val = 0;
1334 u16 beacon_factor = 0;
1335
1336 beacon_factor =
1337 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
1338 / MAX_UCODE_BEACON_INTERVAL;
1339 new_val = beacon_val / beacon_factor;
1340
1341 return cpu_to_le16(new_val);
1342}
1343
4a8a4322 1344static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
b481de9c
ZY
1345{
1346 u64 interval_tm_unit;
1347 u64 tsf, result;
1348 unsigned long flags;
1349 struct ieee80211_conf *conf = NULL;
1350 u16 beacon_int = 0;
1351
1352 conf = ieee80211_get_hw_conf(priv->hw);
1353
1354 spin_lock_irqsave(&priv->lock, flags);
28afaf91 1355 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
b481de9c
ZY
1356 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
1357
28afaf91 1358 tsf = priv->timestamp;
b481de9c
ZY
1359
1360 beacon_int = priv->beacon_int;
1361 spin_unlock_irqrestore(&priv->lock, flags);
1362
05c914fe 1363 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
b481de9c
ZY
1364 if (beacon_int == 0) {
1365 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
1366 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
1367 } else {
1368 priv->rxon_timing.beacon_interval =
1369 cpu_to_le16(beacon_int);
1370 priv->rxon_timing.beacon_interval =
bb8c093b 1371 iwl3945_adjust_beacon_interval(
b481de9c
ZY
1372 le16_to_cpu(priv->rxon_timing.beacon_interval));
1373 }
1374
1375 priv->rxon_timing.atim_window = 0;
1376 } else {
1377 priv->rxon_timing.beacon_interval =
bb8c093b 1378 iwl3945_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
1379 /* TODO: we need to get atim_window from upper stack
1380 * for now we set to 0 */
1381 priv->rxon_timing.atim_window = 0;
1382 }
1383
1384 interval_tm_unit =
1385 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
1386 result = do_div(tsf, interval_tm_unit);
1387 priv->rxon_timing.beacon_init_val =
1388 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
1389
1390 IWL_DEBUG_ASSOC
1391 ("beacon interval %d beacon timer %d beacon tim %d\n",
1392 le16_to_cpu(priv->rxon_timing.beacon_interval),
1393 le32_to_cpu(priv->rxon_timing.beacon_init_val),
1394 le16_to_cpu(priv->rxon_timing.atim_window));
1395}
1396
4a8a4322 1397static int iwl3945_scan_initiate(struct iwl_priv *priv)
b481de9c 1398{
775a6e27 1399 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
1400 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1401 return -EIO;
1402 }
1403
1404 if (test_bit(STATUS_SCANNING, &priv->status)) {
1405 IWL_DEBUG_SCAN("Scan already in progress.\n");
1406 return -EAGAIN;
1407 }
1408
1409 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1410 IWL_DEBUG_SCAN("Scan request while abort pending. "
1411 "Queuing.\n");
1412 return -EAGAIN;
1413 }
1414
1415 IWL_DEBUG_INFO("Starting scan...\n");
66b5004d
RR
1416 if (priv->cfg->sku & IWL_SKU_G)
1417 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
1418 if (priv->cfg->sku & IWL_SKU_A)
1419 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
b481de9c
ZY
1420 set_bit(STATUS_SCANNING, &priv->status);
1421 priv->scan_start = jiffies;
1422 priv->scan_pass_start = priv->scan_start;
1423
1424 queue_work(priv->workqueue, &priv->request_scan);
1425
1426 return 0;
1427}
1428
4a8a4322 1429static int iwl3945_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
b481de9c 1430{
f2c7e521 1431 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
b481de9c
ZY
1432
1433 if (hw_decrypt)
1434 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
1435 else
1436 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
1437
1438 return 0;
1439}
1440
4a8a4322 1441static void iwl3945_set_flags_for_phymode(struct iwl_priv *priv,
8318d78a 1442 enum ieee80211_band band)
b481de9c 1443{
8318d78a 1444 if (band == IEEE80211_BAND_5GHZ) {
f2c7e521 1445 priv->staging39_rxon.flags &=
b481de9c
ZY
1446 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1447 | RXON_FLG_CCK_MSK);
f2c7e521 1448 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1449 } else {
bb8c093b 1450 /* Copied from iwl3945_bg_post_associate() */
b481de9c 1451 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 1452 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1453 else
f2c7e521 1454 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1455
05c914fe 1456 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 1457 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1458
f2c7e521
AK
1459 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1460 priv->staging39_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1461 priv->staging39_rxon.flags &= ~RXON_FLG_CCK_MSK;
b481de9c
ZY
1462 }
1463}
1464
1465/*
01ebd063 1466 * initialize rxon structure with default values from eeprom
b481de9c 1467 */
4a8a4322 1468static void iwl3945_connection_init_rx_config(struct iwl_priv *priv,
60294de3 1469 int mode)
b481de9c 1470{
d20b3c65 1471 const struct iwl_channel_info *ch_info;
b481de9c 1472
f2c7e521 1473 memset(&priv->staging39_rxon, 0, sizeof(priv->staging39_rxon));
b481de9c 1474
60294de3 1475 switch (mode) {
05c914fe 1476 case NL80211_IFTYPE_AP:
f2c7e521 1477 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_AP;
b481de9c
ZY
1478 break;
1479
05c914fe 1480 case NL80211_IFTYPE_STATION:
f2c7e521
AK
1481 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_ESS;
1482 priv->staging39_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
b481de9c
ZY
1483 break;
1484
05c914fe 1485 case NL80211_IFTYPE_ADHOC:
f2c7e521
AK
1486 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1487 priv->staging39_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1488 priv->staging39_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
b481de9c
ZY
1489 RXON_FILTER_ACCEPT_GRP_MSK;
1490 break;
1491
05c914fe 1492 case NL80211_IFTYPE_MONITOR:
f2c7e521
AK
1493 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
1494 priv->staging39_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
b481de9c
ZY
1495 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
1496 break;
69dc5d9d 1497 default:
15b1687c 1498 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
69dc5d9d 1499 break;
b481de9c
ZY
1500 }
1501
1502#if 0
1503 /* TODO: Figure out when short_preamble would be set and cache from
1504 * that */
1505 if (!hw_to_local(priv->hw)->short_preamble)
f2c7e521 1506 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 1507 else
f2c7e521 1508 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c
ZY
1509#endif
1510
8318d78a 1511 ch_info = iwl3945_get_channel_info(priv, priv->band,
f2c7e521 1512 le16_to_cpu(priv->active39_rxon.channel));
b481de9c
ZY
1513
1514 if (!ch_info)
1515 ch_info = &priv->channel_info[0];
1516
1517 /*
1518 * in some case A channels are all non IBSS
1519 * in this case force B/G channel
1520 */
60294de3 1521 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
b481de9c
ZY
1522 ch_info = &priv->channel_info[0];
1523
f2c7e521 1524 priv->staging39_rxon.channel = cpu_to_le16(ch_info->channel);
b481de9c 1525 if (is_channel_a_band(ch_info))
8318d78a 1526 priv->band = IEEE80211_BAND_5GHZ;
b481de9c 1527 else
8318d78a 1528 priv->band = IEEE80211_BAND_2GHZ;
b481de9c 1529
8318d78a 1530 iwl3945_set_flags_for_phymode(priv, priv->band);
b481de9c 1531
f2c7e521 1532 priv->staging39_rxon.ofdm_basic_rates =
b481de9c 1533 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
f2c7e521 1534 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
1535 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1536}
1537
4a8a4322 1538static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
b481de9c 1539{
05c914fe 1540 if (mode == NL80211_IFTYPE_ADHOC) {
d20b3c65 1541 const struct iwl_channel_info *ch_info;
b481de9c 1542
bb8c093b 1543 ch_info = iwl3945_get_channel_info(priv,
8318d78a 1544 priv->band,
f2c7e521 1545 le16_to_cpu(priv->staging39_rxon.channel));
b481de9c
ZY
1546
1547 if (!ch_info || !is_channel_ibss(ch_info)) {
15b1687c 1548 IWL_ERR(priv, "channel %d not IBSS channel\n",
f2c7e521 1549 le16_to_cpu(priv->staging39_rxon.channel));
b481de9c
ZY
1550 return -EINVAL;
1551 }
1552 }
1553
60294de3 1554 iwl3945_connection_init_rx_config(priv, mode);
f2c7e521 1555 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
b481de9c 1556
bb8c093b 1557 iwl3945_clear_stations_table(priv);
b481de9c 1558
a96a27f9 1559 /* don't commit rxon if rf-kill is on*/
775a6e27 1560 if (!iwl_is_ready_rf(priv))
fde3571f
MA
1561 return -EAGAIN;
1562
1563 cancel_delayed_work(&priv->scan_check);
af0053d6 1564 if (iwl_scan_cancel_timeout(priv, 100)) {
39aadf8c 1565 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
fde3571f
MA
1566 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
1567 return -EAGAIN;
1568 }
1569
bb8c093b 1570 iwl3945_commit_rxon(priv);
b481de9c
ZY
1571
1572 return 0;
1573}
1574
4a8a4322 1575static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
e039fa4a 1576 struct ieee80211_tx_info *info,
c2d79b48 1577 struct iwl_cmd *cmd,
b481de9c
ZY
1578 struct sk_buff *skb_frag,
1579 int last_frag)
1580{
e52119c5 1581 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1c014420 1582 struct iwl3945_hw_key *keyinfo =
f2c7e521 1583 &priv->stations_39[info->control.hw_key->hw_key_idx].keyinfo;
b481de9c
ZY
1584
1585 switch (keyinfo->alg) {
1586 case ALG_CCMP:
e52119c5
WT
1587 tx->sec_ctl = TX_CMD_SEC_CCM;
1588 memcpy(tx->key, keyinfo->key, keyinfo->keylen);
a96a27f9 1589 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
b481de9c
ZY
1590 break;
1591
1592 case ALG_TKIP:
1593#if 0
e52119c5 1594 tx->sec_ctl = TX_CMD_SEC_TKIP;
b481de9c
ZY
1595
1596 if (last_frag)
e52119c5 1597 memcpy(tx->tkip_mic.byte, skb_frag->tail - 8,
b481de9c
ZY
1598 8);
1599 else
e52119c5 1600 memset(tx->tkip_mic.byte, 0, 8);
b481de9c
ZY
1601#endif
1602 break;
1603
1604 case ALG_WEP:
e52119c5 1605 tx->sec_ctl = TX_CMD_SEC_WEP |
e039fa4a 1606 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
b481de9c
ZY
1607
1608 if (keyinfo->keylen == 13)
e52119c5 1609 tx->sec_ctl |= TX_CMD_SEC_KEY128;
b481de9c 1610
e52119c5 1611 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
b481de9c
ZY
1612
1613 IWL_DEBUG_TX("Configuring packet for WEP encryption "
e039fa4a 1614 "with key %d\n", info->control.hw_key->hw_key_idx);
b481de9c
ZY
1615 break;
1616
b481de9c 1617 default:
978785a3 1618 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg);
b481de9c
ZY
1619 break;
1620 }
1621}
1622
1623/*
1624 * handle build REPLY_TX command notification.
1625 */
4a8a4322 1626static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
c2d79b48 1627 struct iwl_cmd *cmd,
e039fa4a 1628 struct ieee80211_tx_info *info,
e52119c5 1629 struct ieee80211_hdr *hdr, u8 std_id)
b481de9c 1630{
e52119c5
WT
1631 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1632 __le32 tx_flags = tx->tx_flags;
fd7c8a40 1633 __le16 fc = hdr->frame_control;
e6a9854b 1634 u8 rc_flags = info->control.rates[0].flags;
b481de9c 1635
e52119c5 1636 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
e039fa4a 1637 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
b481de9c 1638 tx_flags |= TX_CMD_FLG_ACK_MSK;
fd7c8a40 1639 if (ieee80211_is_mgmt(fc))
b481de9c 1640 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
fd7c8a40 1641 if (ieee80211_is_probe_resp(fc) &&
b481de9c
ZY
1642 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1643 tx_flags |= TX_CMD_FLG_TSF_MSK;
1644 } else {
1645 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1646 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1647 }
1648
e52119c5 1649 tx->sta_id = std_id;
8b7b1e05 1650 if (ieee80211_has_morefrags(fc))
b481de9c
ZY
1651 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1652
fd7c8a40
HH
1653 if (ieee80211_is_data_qos(fc)) {
1654 u8 *qc = ieee80211_get_qos_ctl(hdr);
e52119c5 1655 tx->tid_tspec = qc[0] & 0xf;
b481de9c 1656 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
54dbb525 1657 } else {
b481de9c 1658 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
54dbb525 1659 }
b481de9c 1660
e6a9854b 1661 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
b481de9c
ZY
1662 tx_flags |= TX_CMD_FLG_RTS_MSK;
1663 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
e6a9854b 1664 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
b481de9c
ZY
1665 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
1666 tx_flags |= TX_CMD_FLG_CTS_MSK;
1667 }
1668
1669 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
1670 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
1671
1672 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
fd7c8a40
HH
1673 if (ieee80211_is_mgmt(fc)) {
1674 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
e52119c5 1675 tx->timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 1676 else
e52119c5 1677 tx->timeout.pm_frame_timeout = cpu_to_le16(2);
ab53d8af 1678 } else {
e52119c5 1679 tx->timeout.pm_frame_timeout = 0;
ab53d8af
MA
1680#ifdef CONFIG_IWL3945_LEDS
1681 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
1682#endif
1683 }
b481de9c 1684
e52119c5
WT
1685 tx->driver_txop = 0;
1686 tx->tx_flags = tx_flags;
1687 tx->next_frame_len = 0;
b481de9c
ZY
1688}
1689
6440adb5
BC
1690/**
1691 * iwl3945_get_sta_id - Find station's index within station table
1692 */
4a8a4322 1693static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
b481de9c
ZY
1694{
1695 int sta_id;
1696 u16 fc = le16_to_cpu(hdr->frame_control);
1697
6440adb5 1698 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
1699 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
1700 is_multicast_ether_addr(hdr->addr1))
3832ec9d 1701 return priv->hw_params.bcast_sta_id;
b481de9c
ZY
1702
1703 switch (priv->iw_mode) {
1704
6440adb5
BC
1705 /* If we are a client station in a BSS network, use the special
1706 * AP station entry (that's the only station we communicate with) */
05c914fe 1707 case NL80211_IFTYPE_STATION:
b481de9c
ZY
1708 return IWL_AP_ID;
1709
1710 /* If we are an AP, then find the station, or use BCAST */
05c914fe 1711 case NL80211_IFTYPE_AP:
bb8c093b 1712 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
1713 if (sta_id != IWL_INVALID_STATION)
1714 return sta_id;
3832ec9d 1715 return priv->hw_params.bcast_sta_id;
b481de9c 1716
6440adb5
BC
1717 /* If this frame is going out to an IBSS network, find the station,
1718 * or create a new station table entry */
05c914fe 1719 case NL80211_IFTYPE_ADHOC: {
6440adb5 1720 /* Create new station table entry */
bb8c093b 1721 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
1722 if (sta_id != IWL_INVALID_STATION)
1723 return sta_id;
1724
bb8c093b 1725 sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
b481de9c
ZY
1726
1727 if (sta_id != IWL_INVALID_STATION)
1728 return sta_id;
1729
e174961c 1730 IWL_DEBUG_DROP("Station %pM not in station map. "
b481de9c 1731 "Defaulting to broadcast...\n",
e174961c 1732 hdr->addr1);
40b8ec0b 1733 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
3832ec9d 1734 return priv->hw_params.bcast_sta_id;
0795af57 1735 }
914233d6
SG
1736 /* If we are in monitor mode, use BCAST. This is required for
1737 * packet injection. */
05c914fe 1738 case NL80211_IFTYPE_MONITOR:
3832ec9d 1739 return priv->hw_params.bcast_sta_id;
914233d6 1740
b481de9c 1741 default:
39aadf8c
WT
1742 IWL_WARN(priv, "Unknown mode of operation: %d\n",
1743 priv->iw_mode);
3832ec9d 1744 return priv->hw_params.bcast_sta_id;
b481de9c
ZY
1745 }
1746}
1747
1748/*
1749 * start REPLY_TX command process
1750 */
4a8a4322 1751static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
b481de9c
ZY
1752{
1753 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 1754 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
e52119c5 1755 struct iwl3945_tx_cmd *tx;
188cf6c7 1756 struct iwl_tx_queue *txq = NULL;
d20b3c65 1757 struct iwl_queue *q = NULL;
e52119c5 1758 struct iwl_cmd *out_cmd = NULL;
b481de9c
ZY
1759 dma_addr_t phys_addr;
1760 dma_addr_t txcmd_phys;
e52119c5 1761 int txq_id = skb_get_queue_mapping(skb);
54dbb525
TW
1762 u16 len, idx, len_org, hdr_len;
1763 u8 id;
1764 u8 unicast;
b481de9c 1765 u8 sta_id;
54dbb525 1766 u8 tid = 0;
b481de9c 1767 u16 seq_number = 0;
fd7c8a40 1768 __le16 fc;
b481de9c 1769 u8 wait_write_ptr = 0;
54dbb525 1770 u8 *qc = NULL;
b481de9c
ZY
1771 unsigned long flags;
1772 int rc;
1773
1774 spin_lock_irqsave(&priv->lock, flags);
775a6e27 1775 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
1776 IWL_DEBUG_DROP("Dropping - RF KILL\n");
1777 goto drop_unlock;
1778 }
1779
e039fa4a 1780 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
15b1687c 1781 IWL_ERR(priv, "ERROR: No TX rate available.\n");
b481de9c
ZY
1782 goto drop_unlock;
1783 }
1784
1785 unicast = !is_multicast_ether_addr(hdr->addr1);
1786 id = 0;
1787
fd7c8a40 1788 fc = hdr->frame_control;
b481de9c 1789
c8b0e6e1 1790#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
1791 if (ieee80211_is_auth(fc))
1792 IWL_DEBUG_TX("Sending AUTH frame\n");
fd7c8a40 1793 else if (ieee80211_is_assoc_req(fc))
b481de9c 1794 IWL_DEBUG_TX("Sending ASSOC frame\n");
fd7c8a40 1795 else if (ieee80211_is_reassoc_req(fc))
b481de9c
ZY
1796 IWL_DEBUG_TX("Sending REASSOC frame\n");
1797#endif
1798
7878a5a4 1799 /* drop all data frame if we are not associated */
914233d6 1800 if (ieee80211_is_data(fc) &&
05c914fe 1801 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
914233d6 1802 (!iwl3945_is_associated(priv) ||
05c914fe 1803 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
bb8c093b 1804 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
b481de9c
ZY
1805 goto drop_unlock;
1806 }
1807
1808 spin_unlock_irqrestore(&priv->lock, flags);
1809
7294ec95 1810 hdr_len = ieee80211_hdrlen(fc);
6440adb5
BC
1811
1812 /* Find (or create) index into station table for destination station */
bb8c093b 1813 sta_id = iwl3945_get_sta_id(priv, hdr);
b481de9c 1814 if (sta_id == IWL_INVALID_STATION) {
e174961c
JB
1815 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
1816 hdr->addr1);
b481de9c
ZY
1817 goto drop;
1818 }
1819
1820 IWL_DEBUG_RATE("station Id %d\n", sta_id);
1821
fd7c8a40
HH
1822 if (ieee80211_is_data_qos(fc)) {
1823 qc = ieee80211_get_qos_ctl(hdr);
7294ec95 1824 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
f2c7e521 1825 seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
b481de9c
ZY
1826 IEEE80211_SCTL_SEQ;
1827 hdr->seq_ctrl = cpu_to_le16(seq_number) |
1828 (hdr->seq_ctrl &
1829 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
1830 seq_number += 0x10;
1831 }
6440adb5
BC
1832
1833 /* Descriptor for chosen Tx queue */
188cf6c7 1834 txq = &priv->txq[txq_id];
b481de9c
ZY
1835 q = &txq->q;
1836
1837 spin_lock_irqsave(&priv->lock, flags);
1838
fc4b6853 1839 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 1840
6440adb5 1841 /* Set up driver data for this TFD */
dbb6654c 1842 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
fc4b6853 1843 txq->txb[q->write_ptr].skb[0] = skb;
6440adb5
BC
1844
1845 /* Init first empty entry in queue's array of Tx/cmd buffers */
188cf6c7 1846 out_cmd = txq->cmd[idx];
e52119c5 1847 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
b481de9c 1848 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
e52119c5 1849 memset(tx, 0, sizeof(*tx));
6440adb5
BC
1850
1851 /*
1852 * Set up the Tx-command (not MAC!) header.
1853 * Store the chosen Tx queue and TFD index within the sequence field;
1854 * after Tx, uCode's Tx response will return this value so driver can
1855 * locate the frame within the tx queue and do post-tx processing.
1856 */
b481de9c
ZY
1857 out_cmd->hdr.cmd = REPLY_TX;
1858 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 1859 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
1860
1861 /* Copy MAC header from skb into command buffer */
e52119c5 1862 memcpy(tx->hdr, hdr, hdr_len);
b481de9c 1863
6440adb5
BC
1864 /*
1865 * Use the first empty entry in this queue's command buffer array
1866 * to contain the Tx command and MAC header concatenated together
1867 * (payload data will be in another buffer).
1868 * Size of this varies, due to varying MAC header length.
1869 * If end is not dword aligned, we'll have 2 extra bytes at the end
1870 * of the MAC header (device reads on dword boundaries).
1871 * We'll tell device about this padding later.
1872 */
3832ec9d 1873 len = sizeof(struct iwl3945_tx_cmd) +
4c897253 1874 sizeof(struct iwl_cmd_header) + hdr_len;
b481de9c
ZY
1875
1876 len_org = len;
1877 len = (len + 3) & ~3;
1878
1879 if (len_org != len)
1880 len_org = 1;
1881 else
1882 len_org = 0;
1883
6440adb5
BC
1884 /* Physical address of this Tx command's header (not MAC header!),
1885 * within command buffer array. */
188cf6c7
SO
1886 txcmd_phys = pci_map_single(priv->pci_dev,
1887 out_cmd, sizeof(struct iwl_cmd),
1888 PCI_DMA_TODEVICE);
1889 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
1890 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
1891 /* Add buffer containing Tx command and MAC(!) header to TFD's
1892 * first entry */
1893 txcmd_phys += offsetof(struct iwl_cmd, hdr);
b481de9c 1894
6440adb5
BC
1895 /* Add buffer containing Tx command and MAC(!) header to TFD's
1896 * first entry */
7aaa1d79
SO
1897 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1898 txcmd_phys, len, 1, 0);
b481de9c 1899
d0f09804 1900 if (info->control.hw_key)
e039fa4a 1901 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
b481de9c 1902
6440adb5
BC
1903 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1904 * if any (802.11 null frames have no payload). */
b481de9c
ZY
1905 len = skb->len - hdr_len;
1906 if (len) {
1907 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
1908 len, PCI_DMA_TODEVICE);
7aaa1d79
SO
1909 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1910 phys_addr, len,
1911 0, U32_PAD(len));
b481de9c
ZY
1912 }
1913
6440adb5 1914 /* Total # bytes to be transmitted */
b481de9c 1915 len = (u16)skb->len;
e52119c5 1916 tx->len = cpu_to_le16(len);
b481de9c
ZY
1917
1918 /* TODO need this for burst mode later on */
e52119c5 1919 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
b481de9c
ZY
1920
1921 /* set is_hcca to 0; it probably will never be implemented */
e039fa4a 1922 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
b481de9c 1923
e52119c5
WT
1924 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
1925 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
b481de9c 1926
8b7b1e05 1927 if (!ieee80211_has_morefrags(hdr->frame_control)) {
b481de9c 1928 txq->need_update = 1;
3ac7f146 1929 if (qc)
f2c7e521 1930 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
b481de9c
ZY
1931 } else {
1932 wait_write_ptr = 1;
1933 txq->need_update = 0;
1934 }
1935
e52119c5 1936 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
b481de9c 1937
e52119c5 1938 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
7294ec95 1939 ieee80211_hdrlen(fc));
b481de9c 1940
6440adb5 1941 /* Tell device the write index *just past* this latest filled TFD */
c54b679d 1942 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
4f3602c8 1943 rc = iwl_txq_update_write_ptr(priv, txq);
b481de9c
ZY
1944 spin_unlock_irqrestore(&priv->lock, flags);
1945
1946 if (rc)
1947 return rc;
1948
d20b3c65 1949 if ((iwl_queue_space(q) < q->high_mark)
b481de9c
ZY
1950 && priv->mac80211_registered) {
1951 if (wait_write_ptr) {
1952 spin_lock_irqsave(&priv->lock, flags);
1953 txq->need_update = 1;
4f3602c8 1954 iwl_txq_update_write_ptr(priv, txq);
b481de9c
ZY
1955 spin_unlock_irqrestore(&priv->lock, flags);
1956 }
1957
e2530083 1958 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
b481de9c
ZY
1959 }
1960
1961 return 0;
1962
1963drop_unlock:
1964 spin_unlock_irqrestore(&priv->lock, flags);
1965drop:
1966 return -1;
1967}
1968
4a8a4322 1969static void iwl3945_set_rate(struct iwl_priv *priv)
b481de9c 1970{
8318d78a 1971 const struct ieee80211_supported_band *sband = NULL;
b481de9c
ZY
1972 struct ieee80211_rate *rate;
1973 int i;
1974
cbba18c6 1975 sband = iwl_get_hw_mode(priv, priv->band);
8318d78a 1976 if (!sband) {
15b1687c 1977 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
c4ba9621
SA
1978 return;
1979 }
b481de9c
ZY
1980
1981 priv->active_rate = 0;
1982 priv->active_rate_basic = 0;
1983
8318d78a
JB
1984 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
1985 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
1986
1987 for (i = 0; i < sband->n_bitrates; i++) {
1988 rate = &sband->bitrates[i];
1989 if ((rate->hw_value < IWL_RATE_COUNT) &&
1990 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
1991 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
1992 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
1993 priv->active_rate |= (1 << rate->hw_value);
1994 }
b481de9c
ZY
1995 }
1996
1997 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
1998 priv->active_rate, priv->active_rate_basic);
1999
2000 /*
2001 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2002 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2003 * OFDM
2004 */
2005 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
f2c7e521 2006 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
2007 ((priv->active_rate_basic &
2008 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2009 else
f2c7e521 2010 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
2011 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2012
2013 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
f2c7e521 2014 priv->staging39_rxon.ofdm_basic_rates =
b481de9c
ZY
2015 ((priv->active_rate_basic &
2016 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2017 IWL_FIRST_OFDM_RATE) & 0xFF;
2018 else
f2c7e521 2019 priv->staging39_rxon.ofdm_basic_rates =
b481de9c
ZY
2020 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2021}
2022
4a8a4322 2023static void iwl3945_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
b481de9c
ZY
2024{
2025 unsigned long flags;
2026
2027 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2028 return;
2029
2030 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2031 disable_radio ? "OFF" : "ON");
2032
2033 if (disable_radio) {
af0053d6 2034 iwl_scan_cancel(priv);
b481de9c 2035 /* FIXME: This is a workaround for AP */
05c914fe 2036 if (priv->iw_mode != NL80211_IFTYPE_AP) {
b481de9c 2037 spin_lock_irqsave(&priv->lock, flags);
5d49f498 2038 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2039 CSR_UCODE_SW_BIT_RFKILL);
2040 spin_unlock_irqrestore(&priv->lock, flags);
c496294e 2041 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
2042 set_bit(STATUS_RF_KILL_SW, &priv->status);
2043 }
2044 return;
2045 }
2046
2047 spin_lock_irqsave(&priv->lock, flags);
5d49f498 2048 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
2049
2050 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2051 spin_unlock_irqrestore(&priv->lock, flags);
2052
2053 /* wake up ucode */
2054 msleep(10);
2055
2056 spin_lock_irqsave(&priv->lock, flags);
5d49f498
AK
2057 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2058 if (!iwl_grab_nic_access(priv))
2059 iwl_release_nic_access(priv);
b481de9c
ZY
2060 spin_unlock_irqrestore(&priv->lock, flags);
2061
2062 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2063 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2064 "disabled by HW switch\n");
2065 return;
2066 }
2067
808e72a0
ZY
2068 if (priv->is_open)
2069 queue_work(priv->workqueue, &priv->restart);
b481de9c
ZY
2070 return;
2071}
2072
4a8a4322 2073void iwl3945_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
b481de9c
ZY
2074 u32 decrypt_res, struct ieee80211_rx_status *stats)
2075{
2076 u16 fc =
2077 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2078
f2c7e521 2079 if (priv->active39_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
b481de9c
ZY
2080 return;
2081
2082 if (!(fc & IEEE80211_FCTL_PROTECTED))
2083 return;
2084
2085 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2086 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2087 case RX_RES_STATUS_SEC_TYPE_TKIP:
2088 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2089 RX_RES_STATUS_BAD_ICV_MIC)
2090 stats->flag |= RX_FLAG_MMIC_ERROR;
2091 case RX_RES_STATUS_SEC_TYPE_WEP:
2092 case RX_RES_STATUS_SEC_TYPE_CCMP:
2093 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2094 RX_RES_STATUS_DECRYPT_OK) {
2095 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2096 stats->flag |= RX_FLAG_DECRYPTED;
2097 }
2098 break;
2099
2100 default:
2101 break;
2102 }
2103}
2104
c8b0e6e1 2105#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
2106
2107#include "iwl-spectrum.h"
2108
2109#define BEACON_TIME_MASK_LOW 0x00FFFFFF
2110#define BEACON_TIME_MASK_HIGH 0xFF000000
2111#define TIME_UNIT 1024
2112
2113/*
2114 * extended beacon time format
2115 * time in usec will be changed into a 32-bit value in 8:24 format
2116 * the high 1 byte is the beacon counts
2117 * the lower 3 bytes is the time in usec within one beacon interval
2118 */
2119
bb8c093b 2120static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
2121{
2122 u32 quot;
2123 u32 rem;
2124 u32 interval = beacon_interval * 1024;
2125
2126 if (!interval || !usec)
2127 return 0;
2128
2129 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2130 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2131
2132 return (quot << 24) + rem;
2133}
2134
2135/* base is usually what we get from ucode with each received frame,
2136 * the same as HW timer counter counting down
2137 */
2138
bb8c093b 2139static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
2140{
2141 u32 base_low = base & BEACON_TIME_MASK_LOW;
2142 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2143 u32 interval = beacon_interval * TIME_UNIT;
2144 u32 res = (base & BEACON_TIME_MASK_HIGH) +
2145 (addon & BEACON_TIME_MASK_HIGH);
2146
2147 if (base_low > addon_low)
2148 res += base_low - addon_low;
2149 else if (base_low < addon_low) {
2150 res += interval + base_low - addon_low;
2151 res += (1 << 24);
2152 } else
2153 res += (1 << 24);
2154
2155 return cpu_to_le32(res);
2156}
2157
4a8a4322 2158static int iwl3945_get_measurement(struct iwl_priv *priv,
b481de9c
ZY
2159 struct ieee80211_measurement_params *params,
2160 u8 type)
2161{
600c0e11 2162 struct iwl_spectrum_cmd spectrum;
3d24a9f7 2163 struct iwl_rx_packet *res;
c2d79b48 2164 struct iwl_host_cmd cmd = {
b481de9c
ZY
2165 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2166 .data = (void *)&spectrum,
2167 .meta.flags = CMD_WANT_SKB,
2168 };
2169 u32 add_time = le64_to_cpu(params->start_time);
2170 int rc;
2171 int spectrum_resp_status;
2172 int duration = le16_to_cpu(params->duration);
2173
bb8c093b 2174 if (iwl3945_is_associated(priv))
b481de9c 2175 add_time =
bb8c093b 2176 iwl3945_usecs_to_beacons(
b481de9c
ZY
2177 le64_to_cpu(params->start_time) - priv->last_tsf,
2178 le16_to_cpu(priv->rxon_timing.beacon_interval));
2179
2180 memset(&spectrum, 0, sizeof(spectrum));
2181
2182 spectrum.channel_count = cpu_to_le16(1);
2183 spectrum.flags =
2184 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2185 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2186 cmd.len = sizeof(spectrum);
2187 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2188
bb8c093b 2189 if (iwl3945_is_associated(priv))
b481de9c 2190 spectrum.start_time =
bb8c093b 2191 iwl3945_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
2192 add_time,
2193 le16_to_cpu(priv->rxon_timing.beacon_interval));
2194 else
2195 spectrum.start_time = 0;
2196
2197 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2198 spectrum.channels[0].channel = params->channel;
2199 spectrum.channels[0].type = type;
f2c7e521 2200 if (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK)
b481de9c
ZY
2201 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2202 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2203
518099a8 2204 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
2205 if (rc)
2206 return rc;
2207
3d24a9f7 2208 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 2209 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 2210 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
b481de9c
ZY
2211 rc = -EIO;
2212 }
2213
2214 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2215 switch (spectrum_resp_status) {
2216 case 0: /* Command will be handled */
2217 if (res->u.spectrum.id != 0xff) {
bc434dd2
IS
2218 IWL_DEBUG_INFO("Replaced existing measurement: %d\n",
2219 res->u.spectrum.id);
b481de9c
ZY
2220 priv->measurement_status &= ~MEASUREMENT_READY;
2221 }
2222 priv->measurement_status |= MEASUREMENT_ACTIVE;
2223 rc = 0;
2224 break;
2225
2226 case 1: /* Command will not be handled */
2227 rc = -EAGAIN;
2228 break;
2229 }
2230
2231 dev_kfree_skb_any(cmd.meta.u.skb);
2232
2233 return rc;
2234}
2235#endif
2236
4a8a4322 2237static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
6100b588 2238 struct iwl_rx_mem_buffer *rxb)
b481de9c 2239{
3d24a9f7
TW
2240 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2241 struct iwl_alive_resp *palive;
b481de9c
ZY
2242 struct delayed_work *pwork;
2243
2244 palive = &pkt->u.alive_frame;
2245
2246 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
2247 "0x%01X 0x%01X\n",
2248 palive->is_valid, palive->ver_type,
2249 palive->ver_subtype);
2250
2251 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2252 IWL_DEBUG_INFO("Initialization Alive received.\n");
3d24a9f7
TW
2253 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2254 sizeof(struct iwl_alive_resp));
b481de9c
ZY
2255 pwork = &priv->init_alive_start;
2256 } else {
2257 IWL_DEBUG_INFO("Runtime Alive received.\n");
2258 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3d24a9f7 2259 sizeof(struct iwl_alive_resp));
b481de9c 2260 pwork = &priv->alive_start;
bb8c093b 2261 iwl3945_disable_events(priv);
b481de9c
ZY
2262 }
2263
2264 /* We delay the ALIVE response by 5ms to
2265 * give the HW RF Kill time to activate... */
2266 if (palive->is_valid == UCODE_VALID_OK)
2267 queue_delayed_work(priv->workqueue, pwork,
2268 msecs_to_jiffies(5));
2269 else
39aadf8c 2270 IWL_WARN(priv, "uCode did not respond OK.\n");
b481de9c
ZY
2271}
2272
4a8a4322 2273static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
6100b588 2274 struct iwl_rx_mem_buffer *rxb)
b481de9c 2275{
c7e035a9 2276#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2277 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
c7e035a9 2278#endif
b481de9c
ZY
2279
2280 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2281 return;
2282}
2283
4a8a4322 2284static void iwl3945_rx_reply_error(struct iwl_priv *priv,
6100b588 2285 struct iwl_rx_mem_buffer *rxb)
b481de9c 2286{
3d24a9f7 2287 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c 2288
15b1687c 2289 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
b481de9c
ZY
2290 "seq 0x%04X ser 0x%08X\n",
2291 le32_to_cpu(pkt->u.err_resp.error_type),
2292 get_cmd_string(pkt->u.err_resp.cmd_id),
2293 pkt->u.err_resp.cmd_id,
2294 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2295 le32_to_cpu(pkt->u.err_resp.error_info));
2296}
2297
2298#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2299
4a8a4322 2300static void iwl3945_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
b481de9c 2301{
3d24a9f7 2302 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
f2c7e521 2303 struct iwl3945_rxon_cmd *rxon = (void *)&priv->active39_rxon;
600c0e11 2304 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
2305 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
2306 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
2307 rxon->channel = csa->channel;
f2c7e521 2308 priv->staging39_rxon.channel = csa->channel;
b481de9c
ZY
2309}
2310
4a8a4322 2311static void iwl3945_rx_spectrum_measure_notif(struct iwl_priv *priv,
6100b588 2312 struct iwl_rx_mem_buffer *rxb)
b481de9c 2313{
c8b0e6e1 2314#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3d24a9f7 2315 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
600c0e11 2316 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
2317
2318 if (!report->state) {
2319 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
2320 "Spectrum Measure Notification: Start\n");
2321 return;
2322 }
2323
2324 memcpy(&priv->measure_report, report, sizeof(*report));
2325 priv->measurement_status |= MEASUREMENT_READY;
2326#endif
2327}
2328
4a8a4322 2329static void iwl3945_rx_pm_sleep_notif(struct iwl_priv *priv,
6100b588 2330 struct iwl_rx_mem_buffer *rxb)
b481de9c 2331{
c8b0e6e1 2332#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2333 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
600c0e11 2334 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
2335 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
2336 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
2337#endif
2338}
2339
4a8a4322 2340static void iwl3945_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
6100b588 2341 struct iwl_rx_mem_buffer *rxb)
b481de9c 2342{
3d24a9f7 2343 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
2344 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
2345 "notification for %s:\n",
2346 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
40b8ec0b
SO
2347 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
2348 le32_to_cpu(pkt->len));
b481de9c
ZY
2349}
2350
bb8c093b 2351static void iwl3945_bg_beacon_update(struct work_struct *work)
b481de9c 2352{
4a8a4322
AK
2353 struct iwl_priv *priv =
2354 container_of(work, struct iwl_priv, beacon_update);
b481de9c
ZY
2355 struct sk_buff *beacon;
2356
2357 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
e039fa4a 2358 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
b481de9c
ZY
2359
2360 if (!beacon) {
15b1687c 2361 IWL_ERR(priv, "update beacon failed\n");
b481de9c
ZY
2362 return;
2363 }
2364
2365 mutex_lock(&priv->mutex);
2366 /* new beacon skb is allocated every time; dispose previous.*/
2367 if (priv->ibss_beacon)
2368 dev_kfree_skb(priv->ibss_beacon);
2369
2370 priv->ibss_beacon = beacon;
2371 mutex_unlock(&priv->mutex);
2372
bb8c093b 2373 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
2374}
2375
4a8a4322 2376static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
6100b588 2377 struct iwl_rx_mem_buffer *rxb)
b481de9c 2378{
c8b0e6e1 2379#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2380 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
bb8c093b 2381 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
b481de9c
ZY
2382 u8 rate = beacon->beacon_notify_hdr.rate;
2383
2384 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
2385 "tsf %d %d rate %d\n",
2386 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
2387 beacon->beacon_notify_hdr.failure_frame,
2388 le32_to_cpu(beacon->ibss_mgr_status),
2389 le32_to_cpu(beacon->high_tsf),
2390 le32_to_cpu(beacon->low_tsf), rate);
2391#endif
2392
05c914fe 2393 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
b481de9c
ZY
2394 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
2395 queue_work(priv->workqueue, &priv->beacon_update);
2396}
2397
2398/* Service response to REPLY_SCAN_CMD (0x80) */
4a8a4322 2399static void iwl3945_rx_reply_scan(struct iwl_priv *priv,
6100b588 2400 struct iwl_rx_mem_buffer *rxb)
b481de9c 2401{
c8b0e6e1 2402#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2403 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2404 struct iwl_scanreq_notification *notif =
2405 (struct iwl_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
2406
2407 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
2408#endif
2409}
2410
2411/* Service SCAN_START_NOTIFICATION (0x82) */
4a8a4322 2412static void iwl3945_rx_scan_start_notif(struct iwl_priv *priv,
6100b588 2413 struct iwl_rx_mem_buffer *rxb)
b481de9c 2414{
3d24a9f7 2415 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2416 struct iwl_scanstart_notification *notif =
2417 (struct iwl_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
2418 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
2419 IWL_DEBUG_SCAN("Scan start: "
2420 "%d [802.11%s] "
2421 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
2422 notif->channel,
2423 notif->band ? "bg" : "a",
2424 notif->tsf_high,
2425 notif->tsf_low, notif->status, notif->beacon_timer);
2426}
2427
2428/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
4a8a4322 2429static void iwl3945_rx_scan_results_notif(struct iwl_priv *priv,
6100b588 2430 struct iwl_rx_mem_buffer *rxb)
b481de9c 2431{
c7e035a9 2432#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2433 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2434 struct iwl_scanresults_notification *notif =
2435 (struct iwl_scanresults_notification *)pkt->u.raw;
c7e035a9 2436#endif
b481de9c
ZY
2437
2438 IWL_DEBUG_SCAN("Scan ch.res: "
2439 "%d [802.11%s] "
2440 "(TSF: 0x%08X:%08X) - %d "
2441 "elapsed=%lu usec (%dms since last)\n",
2442 notif->channel,
2443 notif->band ? "bg" : "a",
2444 le32_to_cpu(notif->tsf_high),
2445 le32_to_cpu(notif->tsf_low),
2446 le32_to_cpu(notif->statistics[0]),
2447 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
2448 jiffies_to_msecs(elapsed_jiffies
2449 (priv->last_scan_jiffies, jiffies)));
2450
2451 priv->last_scan_jiffies = jiffies;
7878a5a4 2452 priv->next_scan_jiffies = 0;
b481de9c
ZY
2453}
2454
2455/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
4a8a4322 2456static void iwl3945_rx_scan_complete_notif(struct iwl_priv *priv,
6100b588 2457 struct iwl_rx_mem_buffer *rxb)
b481de9c 2458{
c7e035a9 2459#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2460 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253 2461 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
c7e035a9 2462#endif
b481de9c
ZY
2463
2464 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
2465 scan_notif->scanned_channels,
2466 scan_notif->tsf_low,
2467 scan_notif->tsf_high, scan_notif->status);
2468
2469 /* The HW is no longer scanning */
2470 clear_bit(STATUS_SCAN_HW, &priv->status);
2471
2472 /* The scan completion notification came in, so kill that timer... */
2473 cancel_delayed_work(&priv->scan_check);
2474
2475 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
66b5004d
RR
2476 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
2477 "2.4" : "5.2",
b481de9c
ZY
2478 jiffies_to_msecs(elapsed_jiffies
2479 (priv->scan_pass_start, jiffies)));
2480
66b5004d
RR
2481 /* Remove this scanned band from the list of pending
2482 * bands to scan, band G precedes A in order of scanning
2483 * as seen in iwl3945_bg_request_scan */
2484 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
2485 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
2486 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
2487 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
b481de9c
ZY
2488
2489 /* If a request to abort was given, or the scan did not succeed
2490 * then we reset the scan state machine and terminate,
2491 * re-queuing another scan if one has been requested */
2492 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2493 IWL_DEBUG_INFO("Aborted scan completed.\n");
2494 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
2495 } else {
2496 /* If there are more bands on this scan pass reschedule */
2497 if (priv->scan_bands > 0)
2498 goto reschedule;
2499 }
2500
2501 priv->last_scan_jiffies = jiffies;
7878a5a4 2502 priv->next_scan_jiffies = 0;
b481de9c
ZY
2503 IWL_DEBUG_INFO("Setting scan to off\n");
2504
2505 clear_bit(STATUS_SCANNING, &priv->status);
2506
2507 IWL_DEBUG_INFO("Scan took %dms\n",
2508 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
2509
2510 queue_work(priv->workqueue, &priv->scan_completed);
2511
2512 return;
2513
2514reschedule:
2515 priv->scan_pass_start = jiffies;
2516 queue_work(priv->workqueue, &priv->request_scan);
2517}
2518
2519/* Handle notification from uCode that card's power state is changing
2520 * due to software, hardware, or critical temperature RFKILL */
4a8a4322 2521static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
6100b588 2522 struct iwl_rx_mem_buffer *rxb)
b481de9c 2523{
3d24a9f7 2524 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
2525 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
2526 unsigned long status = priv->status;
2527
2528 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
2529 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
2530 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
2531
5d49f498 2532 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2533 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2534
2535 if (flags & HW_CARD_DISABLED)
2536 set_bit(STATUS_RF_KILL_HW, &priv->status);
2537 else
2538 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2539
2540
2541 if (flags & SW_CARD_DISABLED)
2542 set_bit(STATUS_RF_KILL_SW, &priv->status);
2543 else
2544 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2545
af0053d6 2546 iwl_scan_cancel(priv);
b481de9c
ZY
2547
2548 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
2549 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
2550 (test_bit(STATUS_RF_KILL_SW, &status) !=
2551 test_bit(STATUS_RF_KILL_SW, &priv->status)))
2552 queue_work(priv->workqueue, &priv->rf_kill);
2553 else
2554 wake_up_interruptible(&priv->wait_command_queue);
2555}
2556
2557/**
bb8c093b 2558 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
2559 *
2560 * Setup the RX handlers for each of the reply types sent from the uCode
2561 * to the host.
2562 *
2563 * This function chains into the hardware specific files for them to setup
2564 * any hardware specific handlers as well.
2565 */
4a8a4322 2566static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
b481de9c 2567{
bb8c093b
CH
2568 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
2569 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
2570 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
2571 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa;
b481de9c 2572 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
2573 iwl3945_rx_spectrum_measure_notif;
2574 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
b481de9c 2575 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
2576 iwl3945_rx_pm_debug_statistics_notif;
2577 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
b481de9c 2578
9fbab516
BC
2579 /*
2580 * The same handler is used for both the REPLY to a discrete
2581 * statistics request from the host as well as for the periodic
2582 * statistics notifications (after received beacons) from the uCode.
b481de9c 2583 */
bb8c093b
CH
2584 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
2585 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
b481de9c 2586
bb8c093b
CH
2587 priv->rx_handlers[REPLY_SCAN_CMD] = iwl3945_rx_reply_scan;
2588 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl3945_rx_scan_start_notif;
b481de9c 2589 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 2590 iwl3945_rx_scan_results_notif;
b481de9c 2591 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
2592 iwl3945_rx_scan_complete_notif;
2593 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
b481de9c 2594
9fbab516 2595 /* Set up hardware specific Rx handlers */
bb8c093b 2596 iwl3945_hw_rx_handler_setup(priv);
b481de9c
ZY
2597}
2598
91c066f2
TW
2599/**
2600 * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
2601 * When FW advances 'R' index, all entries between old and new 'R' index
2602 * need to be reclaimed.
2603 */
4a8a4322 2604static void iwl3945_cmd_queue_reclaim(struct iwl_priv *priv,
91c066f2
TW
2605 int txq_id, int index)
2606{
188cf6c7 2607 struct iwl_tx_queue *txq = &priv->txq[txq_id];
d20b3c65 2608 struct iwl_queue *q = &txq->q;
91c066f2
TW
2609 int nfreed = 0;
2610
625a381a 2611 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
15b1687c 2612 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
91c066f2
TW
2613 "is out of range [0-%d] %d %d.\n", txq_id,
2614 index, q->n_bd, q->write_ptr, q->read_ptr);
2615 return;
2616 }
2617
2618 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
2619 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2620 if (nfreed > 1) {
15b1687c 2621 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", index,
91c066f2
TW
2622 q->write_ptr, q->read_ptr);
2623 queue_work(priv->workqueue, &priv->restart);
2624 break;
2625 }
2626 nfreed++;
2627 }
2628}
2629
2630
b481de9c 2631/**
bb8c093b 2632 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
2633 * @rxb: Rx buffer to reclaim
2634 *
2635 * If an Rx buffer has an async callback associated with it the callback
2636 * will be executed. The attached skb (if present) will only be freed
2637 * if the callback returns 1
2638 */
4a8a4322 2639static void iwl3945_tx_cmd_complete(struct iwl_priv *priv,
6100b588 2640 struct iwl_rx_mem_buffer *rxb)
b481de9c 2641{
3d24a9f7 2642 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
b481de9c
ZY
2643 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2644 int txq_id = SEQ_TO_QUEUE(sequence);
2645 int index = SEQ_TO_INDEX(sequence);
600c0e11 2646 int huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
b481de9c 2647 int cmd_index;
c2d79b48 2648 struct iwl_cmd *cmd;
b481de9c 2649
638d0eb9
CR
2650 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
2651 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
2652 txq_id, sequence,
2653 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
2654 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
2655 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
2656 return;
2657 }
b481de9c 2658
188cf6c7
SO
2659 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
2660 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
b481de9c
ZY
2661
2662 /* Input error checking is done when commands are added to queue. */
2663 if (cmd->meta.flags & CMD_WANT_SKB) {
2664 cmd->meta.source->u.skb = rxb->skb;
2665 rxb->skb = NULL;
2666 } else if (cmd->meta.u.callback &&
2667 !cmd->meta.u.callback(priv, cmd, rxb->skb))
2668 rxb->skb = NULL;
2669
91c066f2 2670 iwl3945_cmd_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
2671
2672 if (!(cmd->meta.flags & CMD_ASYNC)) {
2673 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
2674 wake_up_interruptible(&priv->wait_command_queue);
2675 }
2676}
2677
2678/************************** RX-FUNCTIONS ****************************/
2679/*
2680 * Rx theory of operation
2681 *
2682 * The host allocates 32 DMA target addresses and passes the host address
2683 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
2684 * 0 to 31
2685 *
2686 * Rx Queue Indexes
2687 * The host/firmware share two index registers for managing the Rx buffers.
2688 *
2689 * The READ index maps to the first position that the firmware may be writing
2690 * to -- the driver can read up to (but not including) this position and get
2691 * good data.
2692 * The READ index is managed by the firmware once the card is enabled.
2693 *
2694 * The WRITE index maps to the last position the driver has read from -- the
2695 * position preceding WRITE is the last slot the firmware can place a packet.
2696 *
2697 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2698 * WRITE = READ.
2699 *
9fbab516 2700 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
2701 * INDEX position, and WRITE to the last (READ - 1 wrapped)
2702 *
9fbab516 2703 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
2704 * and fire the RX interrupt. The driver can then query the READ index and
2705 * process as many packets as possible, moving the WRITE index forward as it
2706 * resets the Rx queue buffers with new memory.
2707 *
2708 * The management in the driver is as follows:
2709 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2710 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 2711 * to replenish the iwl->rxq->rx_free.
bb8c093b 2712 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
2713 * iwl->rxq is replenished and the READ INDEX is updated (updating the
2714 * 'processed' and 'read' driver indexes as well)
2715 * + A received packet is processed and handed to the kernel network stack,
2716 * detached from the iwl->rxq. The driver 'processed' index is updated.
2717 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2718 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2719 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
2720 * were enough free buffers and RX_STALLED is set it is cleared.
2721 *
2722 *
2723 * Driver sequence:
2724 *
9fbab516 2725 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 2726 * iwl3945_rx_queue_restock
9fbab516 2727 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
2728 * queue, updates firmware pointers, and updates
2729 * the WRITE index. If insufficient rx_free buffers
bb8c093b 2730 * are available, schedules iwl3945_rx_replenish
b481de9c
ZY
2731 *
2732 * -- enable interrupts --
6100b588 2733 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
b481de9c
ZY
2734 * READ INDEX, detaching the SKB from the pool.
2735 * Moves the packet buffer from queue to rx_used.
bb8c093b 2736 * Calls iwl3945_rx_queue_restock to refill any empty
b481de9c
ZY
2737 * slots.
2738 * ...
2739 *
2740 */
2741
b481de9c 2742/**
9fbab516 2743 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 2744 */
4a8a4322 2745static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
b481de9c
ZY
2746 dma_addr_t dma_addr)
2747{
2748 return cpu_to_le32((u32)dma_addr);
2749}
2750
2751/**
bb8c093b 2752 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 2753 *
9fbab516 2754 * If there are slots in the RX queue that need to be restocked,
b481de9c 2755 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 2756 * as we can, pulling from rx_free.
b481de9c
ZY
2757 *
2758 * This moves the 'write' index forward to catch up with 'processed', and
2759 * also updates the memory address in the firmware to reference the new
2760 * target buffer.
2761 */
4a8a4322 2762static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
b481de9c 2763{
cc2f362c 2764 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c 2765 struct list_head *element;
6100b588 2766 struct iwl_rx_mem_buffer *rxb;
b481de9c
ZY
2767 unsigned long flags;
2768 int write, rc;
2769
2770 spin_lock_irqsave(&rxq->lock, flags);
2771 write = rxq->write & ~0x7;
37d68317 2772 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 2773 /* Get next free Rx buffer, remove from free list */
b481de9c 2774 element = rxq->rx_free.next;
6100b588 2775 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
b481de9c 2776 list_del(element);
6440adb5
BC
2777
2778 /* Point to Rx buffer via next RBD in circular buffer */
6100b588 2779 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
b481de9c
ZY
2780 rxq->queue[rxq->write] = rxb;
2781 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
2782 rxq->free_count--;
2783 }
2784 spin_unlock_irqrestore(&rxq->lock, flags);
2785 /* If the pre-allocated buffer pool is dropping low, schedule to
2786 * refill it */
2787 if (rxq->free_count <= RX_LOW_WATERMARK)
2788 queue_work(priv->workqueue, &priv->rx_replenish);
2789
2790
6440adb5
BC
2791 /* If we've added more space for the firmware to place data, tell it.
2792 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
2793 if ((write != (rxq->write & ~0x7))
2794 || (abs(rxq->write - rxq->read) > 7)) {
2795 spin_lock_irqsave(&rxq->lock, flags);
2796 rxq->need_update = 1;
2797 spin_unlock_irqrestore(&rxq->lock, flags);
141c43a3 2798 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
2799 if (rc)
2800 return rc;
2801 }
2802
2803 return 0;
2804}
2805
2806/**
bb8c093b 2807 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
2808 *
2809 * When moving to rx_free an SKB is allocated for the slot.
2810 *
bb8c093b 2811 * Also restock the Rx queue via iwl3945_rx_queue_restock.
01ebd063 2812 * This is called as a scheduled work item (except for during initialization)
b481de9c 2813 */
4a8a4322 2814static void iwl3945_rx_allocate(struct iwl_priv *priv)
b481de9c 2815{
cc2f362c 2816 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c 2817 struct list_head *element;
6100b588 2818 struct iwl_rx_mem_buffer *rxb;
b481de9c
ZY
2819 unsigned long flags;
2820 spin_lock_irqsave(&rxq->lock, flags);
2821 while (!list_empty(&rxq->rx_used)) {
2822 element = rxq->rx_used.next;
6100b588 2823 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
6440adb5
BC
2824
2825 /* Alloc a new receive buffer */
b481de9c 2826 rxb->skb =
1e33dc64
WT
2827 alloc_skb(priv->hw_params.rx_buf_size,
2828 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
2829 if (!rxb->skb) {
2830 if (net_ratelimit())
978785a3 2831 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
b481de9c
ZY
2832 /* We don't reschedule replenish work here -- we will
2833 * call the restock method and if it still needs
2834 * more buffers it will schedule replenish */
2835 break;
2836 }
12342c47
ZY
2837
2838 /* If radiotap head is required, reserve some headroom here.
2839 * The physical head count is a variable rx_stats->phy_count.
2840 * We reserve 4 bytes here. Plus these extra bytes, the
2841 * headroom of the physical head should be enough for the
2842 * radiotap head that iwl3945 supported. See iwl3945_rt.
2843 */
2844 skb_reserve(rxb->skb, 4);
2845
b481de9c
ZY
2846 priv->alloc_rxb_skb++;
2847 list_del(element);
6440adb5
BC
2848
2849 /* Get physical address of RB/SKB */
1e33dc64
WT
2850 rxb->real_dma_addr = pci_map_single(priv->pci_dev,
2851 rxb->skb->data,
2852 priv->hw_params.rx_buf_size,
2853 PCI_DMA_FROMDEVICE);
b481de9c
ZY
2854 list_add_tail(&rxb->list, &rxq->rx_free);
2855 rxq->free_count++;
2856 }
2857 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
2858}
2859
2860/*
2861 * this should be called while priv->lock is locked
2862 */
4fd1f841 2863static void __iwl3945_rx_replenish(void *data)
5c0eef96 2864{
4a8a4322 2865 struct iwl_priv *priv = data;
5c0eef96
MA
2866
2867 iwl3945_rx_allocate(priv);
2868 iwl3945_rx_queue_restock(priv);
2869}
2870
2871
2872void iwl3945_rx_replenish(void *data)
2873{
4a8a4322 2874 struct iwl_priv *priv = data;
5c0eef96
MA
2875 unsigned long flags;
2876
2877 iwl3945_rx_allocate(priv);
b481de9c
ZY
2878
2879 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2880 iwl3945_rx_queue_restock(priv);
b481de9c
ZY
2881 spin_unlock_irqrestore(&priv->lock, flags);
2882}
2883
b481de9c
ZY
2884/* Convert linear signal-to-noise ratio into dB */
2885static u8 ratio2dB[100] = {
2886/* 0 1 2 3 4 5 6 7 8 9 */
2887 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
2888 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
2889 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
2890 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
2891 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
2892 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
2893 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
2894 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
2895 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
2896 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
2897};
2898
2899/* Calculates a relative dB value from a ratio of linear
2900 * (i.e. not dB) signal levels.
2901 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 2902int iwl3945_calc_db_from_ratio(int sig_ratio)
b481de9c 2903{
221c80cf
AB
2904 /* 1000:1 or higher just report as 60 dB */
2905 if (sig_ratio >= 1000)
b481de9c
ZY
2906 return 60;
2907
221c80cf 2908 /* 100:1 or higher, divide by 10 and use table,
b481de9c 2909 * add 20 dB to make up for divide by 10 */
221c80cf 2910 if (sig_ratio >= 100)
3ac7f146 2911 return 20 + (int)ratio2dB[sig_ratio/10];
b481de9c
ZY
2912
2913 /* We shouldn't see this */
2914 if (sig_ratio < 1)
2915 return 0;
2916
2917 /* Use table for ratios 1:1 - 99:1 */
2918 return (int)ratio2dB[sig_ratio];
2919}
2920
2921#define PERFECT_RSSI (-20) /* dBm */
2922#define WORST_RSSI (-95) /* dBm */
2923#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
2924
2925/* Calculate an indication of rx signal quality (a percentage, not dBm!).
2926 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
2927 * about formulas used below. */
bb8c093b 2928int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
2929{
2930 int sig_qual;
2931 int degradation = PERFECT_RSSI - rssi_dbm;
2932
2933 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
2934 * as indicator; formula is (signal dbm - noise dbm).
2935 * SNR at or above 40 is a great signal (100%).
2936 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
2937 * Weakest usable signal is usually 10 - 15 dB SNR. */
2938 if (noise_dbm) {
2939 if (rssi_dbm - noise_dbm >= 40)
2940 return 100;
2941 else if (rssi_dbm < noise_dbm)
2942 return 0;
2943 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
2944
2945 /* Else use just the signal level.
2946 * This formula is a least squares fit of data points collected and
2947 * compared with a reference system that had a percentage (%) display
2948 * for signal quality. */
2949 } else
2950 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
2951 (15 * RSSI_RANGE + 62 * degradation)) /
2952 (RSSI_RANGE * RSSI_RANGE);
2953
2954 if (sig_qual > 100)
2955 sig_qual = 100;
2956 else if (sig_qual < 1)
2957 sig_qual = 0;
2958
2959 return sig_qual;
2960}
2961
2962/**
9fbab516 2963 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
2964 *
2965 * Uses the priv->rx_handlers callback function array to invoke
2966 * the appropriate handlers, including command responses,
2967 * frame-received notifications, and other notifications.
2968 */
4a8a4322 2969static void iwl3945_rx_handle(struct iwl_priv *priv)
b481de9c 2970{
6100b588 2971 struct iwl_rx_mem_buffer *rxb;
3d24a9f7 2972 struct iwl_rx_packet *pkt;
cc2f362c 2973 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
2974 u32 r, i;
2975 int reclaim;
2976 unsigned long flags;
5c0eef96 2977 u8 fill_rx = 0;
d68ab680 2978 u32 count = 8;
b481de9c 2979
6440adb5
BC
2980 /* uCode's read index (stored in shared DRAM) indicates the last Rx
2981 * buffer that the driver may process (last buffer filled by ucode). */
8cd812bc 2982 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
b481de9c
ZY
2983 i = rxq->read;
2984
37d68317 2985 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
5c0eef96 2986 fill_rx = 1;
b481de9c
ZY
2987 /* Rx interrupt, but nothing sent from uCode */
2988 if (i == r)
2989 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
2990
2991 while (i != r) {
2992 rxb = rxq->queue[i];
2993
9fbab516 2994 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
2995 * then a bug has been introduced in the queue refilling
2996 * routines -- catch it here */
2997 BUG_ON(rxb == NULL);
2998
2999 rxq->queue[i] = NULL;
3000
6100b588 3001 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
1e33dc64 3002 priv->hw_params.rx_buf_size,
b481de9c 3003 PCI_DMA_FROMDEVICE);
3d24a9f7 3004 pkt = (struct iwl_rx_packet *)rxb->skb->data;
b481de9c
ZY
3005
3006 /* Reclaim a command buffer only if this packet is a response
3007 * to a (driver-originated) command.
3008 * If the packet (e.g. Rx frame) originated from uCode,
3009 * there is no command buffer to reclaim.
3010 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3011 * but apparently a few don't get set; catch them here. */
3012 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3013 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
3014 (pkt->hdr.cmd != REPLY_TX);
3015
3016 /* Based on type of command response or notification,
3017 * handle those that need handling via function in
bb8c093b 3018 * rx_handlers table. See iwl3945_setup_rx_handlers() */
b481de9c 3019 if (priv->rx_handlers[pkt->hdr.cmd]) {
40b8ec0b 3020 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
b481de9c
ZY
3021 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3022 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3023 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3024 } else {
3025 /* No handling needed */
40b8ec0b 3026 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
b481de9c
ZY
3027 "r %d i %d No handler needed for %s, 0x%02x\n",
3028 r, i, get_cmd_string(pkt->hdr.cmd),
3029 pkt->hdr.cmd);
3030 }
3031
3032 if (reclaim) {
9fbab516 3033 /* Invoke any callbacks, transfer the skb to caller, and
518099a8 3034 * fire off the (possibly) blocking iwl_send_cmd()
b481de9c
ZY
3035 * as we reclaim the driver command queue */
3036 if (rxb && rxb->skb)
bb8c093b 3037 iwl3945_tx_cmd_complete(priv, rxb);
b481de9c 3038 else
39aadf8c 3039 IWL_WARN(priv, "Claim null rxb?\n");
b481de9c
ZY
3040 }
3041
3042 /* For now we just don't re-use anything. We can tweak this
3043 * later to try and re-use notification packets and SKBs that
3044 * fail to Rx correctly */
3045 if (rxb->skb != NULL) {
3046 priv->alloc_rxb_skb--;
3047 dev_kfree_skb_any(rxb->skb);
3048 rxb->skb = NULL;
3049 }
3050
6100b588 3051 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1e33dc64
WT
3052 priv->hw_params.rx_buf_size,
3053 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3054 spin_lock_irqsave(&rxq->lock, flags);
3055 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3056 spin_unlock_irqrestore(&rxq->lock, flags);
3057 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
3058 /* If there are a lot of unused frames,
3059 * restock the Rx queue so ucode won't assert. */
3060 if (fill_rx) {
3061 count++;
3062 if (count >= 8) {
3063 priv->rxq.read = i;
3064 __iwl3945_rx_replenish(priv);
3065 count = 0;
3066 }
3067 }
b481de9c
ZY
3068 }
3069
3070 /* Backtrack one entry */
3071 priv->rxq.read = i;
bb8c093b 3072 iwl3945_rx_queue_restock(priv);
b481de9c
ZY
3073}
3074
c8b0e6e1 3075#ifdef CONFIG_IWL3945_DEBUG
4a8a4322 3076static void iwl3945_print_rx_config_cmd(struct iwl_priv *priv,
40b8ec0b 3077 struct iwl3945_rxon_cmd *rxon)
b481de9c
ZY
3078{
3079 IWL_DEBUG_RADIO("RX CONFIG:\n");
40b8ec0b 3080 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
3081 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3082 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3083 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3084 le32_to_cpu(rxon->filter_flags));
3085 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3086 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3087 rxon->ofdm_basic_rates);
3088 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
e174961c
JB
3089 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3090 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
b481de9c
ZY
3091 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3092}
3093#endif
3094
4a8a4322 3095static void iwl3945_enable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
3096{
3097 IWL_DEBUG_ISR("Enabling interrupts\n");
3098 set_bit(STATUS_INT_ENABLED, &priv->status);
5d49f498 3099 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
3100}
3101
0359facc
MA
3102
3103/* call this function to flush any scheduled tasklet */
4a8a4322 3104static inline void iwl_synchronize_irq(struct iwl_priv *priv)
0359facc 3105{
a96a27f9 3106 /* wait to make sure we flush pending tasklet*/
0359facc
MA
3107 synchronize_irq(priv->pci_dev->irq);
3108 tasklet_kill(&priv->irq_tasklet);
3109}
3110
3111
4a8a4322 3112static inline void iwl3945_disable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
3113{
3114 clear_bit(STATUS_INT_ENABLED, &priv->status);
3115
3116 /* disable interrupts from uCode/NIC to host */
5d49f498 3117 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
3118
3119 /* acknowledge/clear/reset any interrupts still pending
3120 * from uCode or flow handler (Rx/Tx DMA) */
5d49f498
AK
3121 iwl_write32(priv, CSR_INT, 0xffffffff);
3122 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
3123 IWL_DEBUG_ISR("Disabled interrupts\n");
3124}
3125
3126static const char *desc_lookup(int i)
3127{
3128 switch (i) {
3129 case 1:
3130 return "FAIL";
3131 case 2:
3132 return "BAD_PARAM";
3133 case 3:
3134 return "BAD_CHECKSUM";
3135 case 4:
3136 return "NMI_INTERRUPT";
3137 case 5:
3138 return "SYSASSERT";
3139 case 6:
3140 return "FATAL_ERROR";
3141 }
3142
3143 return "UNKNOWN";
3144}
3145
3146#define ERROR_START_OFFSET (1 * sizeof(u32))
3147#define ERROR_ELEM_SIZE (7 * sizeof(u32))
3148
4a8a4322 3149static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
b481de9c
ZY
3150{
3151 u32 i;
3152 u32 desc, time, count, base, data1;
3153 u32 blink1, blink2, ilink1, ilink2;
3154 int rc;
3155
3156 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
3157
bb8c093b 3158 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
15b1687c 3159 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
b481de9c
ZY
3160 return;
3161 }
3162
5d49f498 3163 rc = iwl_grab_nic_access(priv);
b481de9c 3164 if (rc) {
39aadf8c 3165 IWL_WARN(priv, "Can not read from adapter at this time.\n");
b481de9c
ZY
3166 return;
3167 }
3168
5d49f498 3169 count = iwl_read_targ_mem(priv, base);
b481de9c
ZY
3170
3171 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
15b1687c
WT
3172 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
3173 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
3174 priv->status, count);
b481de9c
ZY
3175 }
3176
15b1687c 3177 IWL_ERR(priv, "Desc Time asrtPC blink2 "
b481de9c
ZY
3178 "ilink1 nmiPC Line\n");
3179 for (i = ERROR_START_OFFSET;
3180 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
3181 i += ERROR_ELEM_SIZE) {
5d49f498 3182 desc = iwl_read_targ_mem(priv, base + i);
b481de9c 3183 time =
5d49f498 3184 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
b481de9c 3185 blink1 =
5d49f498 3186 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
b481de9c 3187 blink2 =
5d49f498 3188 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
b481de9c 3189 ilink1 =
5d49f498 3190 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
b481de9c 3191 ilink2 =
5d49f498 3192 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
b481de9c 3193 data1 =
5d49f498 3194 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
b481de9c 3195
15b1687c
WT
3196 IWL_ERR(priv,
3197 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
3198 desc_lookup(desc), desc, time, blink1, blink2,
3199 ilink1, ilink2, data1);
b481de9c
ZY
3200 }
3201
5d49f498 3202 iwl_release_nic_access(priv);
b481de9c
ZY
3203
3204}
3205
f58177b9 3206#define EVENT_START_OFFSET (6 * sizeof(u32))
b481de9c
ZY
3207
3208/**
bb8c093b 3209 * iwl3945_print_event_log - Dump error event log to syslog
b481de9c 3210 *
5d49f498 3211 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
b481de9c 3212 */
4a8a4322 3213static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
b481de9c
ZY
3214 u32 num_events, u32 mode)
3215{
3216 u32 i;
3217 u32 base; /* SRAM byte address of event log header */
3218 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
3219 u32 ptr; /* SRAM byte address of log data */
3220 u32 ev, time, data; /* event log data */
3221
3222 if (num_events == 0)
3223 return;
3224
3225 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
3226
3227 if (mode == 0)
3228 event_size = 2 * sizeof(u32);
3229 else
3230 event_size = 3 * sizeof(u32);
3231
3232 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
3233
3234 /* "time" is actually "data" for mode 0 (no timestamp).
3235 * place event id # at far right for easier visual parsing. */
3236 for (i = 0; i < num_events; i++) {
5d49f498 3237 ev = iwl_read_targ_mem(priv, ptr);
b481de9c 3238 ptr += sizeof(u32);
5d49f498 3239 time = iwl_read_targ_mem(priv, ptr);
b481de9c 3240 ptr += sizeof(u32);
15b1687c
WT
3241 if (mode == 0) {
3242 /* data, ev */
3243 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
3244 } else {
5d49f498 3245 data = iwl_read_targ_mem(priv, ptr);
b481de9c 3246 ptr += sizeof(u32);
15b1687c 3247 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
b481de9c
ZY
3248 }
3249 }
3250}
3251
4a8a4322 3252static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
b481de9c
ZY
3253{
3254 int rc;
3255 u32 base; /* SRAM byte address of event log header */
3256 u32 capacity; /* event log capacity in # entries */
3257 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
3258 u32 num_wraps; /* # times uCode wrapped to top of log */
3259 u32 next_entry; /* index of next entry to be written by uCode */
3260 u32 size; /* # entries that we'll print */
3261
3262 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 3263 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
15b1687c 3264 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
b481de9c
ZY
3265 return;
3266 }
3267
5d49f498 3268 rc = iwl_grab_nic_access(priv);
b481de9c 3269 if (rc) {
39aadf8c 3270 IWL_WARN(priv, "Can not read from adapter at this time.\n");
b481de9c
ZY
3271 return;
3272 }
3273
3274 /* event log header */
5d49f498
AK
3275 capacity = iwl_read_targ_mem(priv, base);
3276 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
3277 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
3278 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
3279
3280 size = num_wraps ? capacity : next_entry;
3281
3282 /* bail out if nothing in log */
3283 if (size == 0) {
15b1687c 3284 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
5d49f498 3285 iwl_release_nic_access(priv);
b481de9c
ZY
3286 return;
3287 }
3288
15b1687c 3289 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
3290 size, num_wraps);
3291
3292 /* if uCode has wrapped back to top of log, start at the oldest entry,
3293 * i.e the next one that uCode would fill. */
3294 if (num_wraps)
bb8c093b 3295 iwl3945_print_event_log(priv, next_entry,
b481de9c
ZY
3296 capacity - next_entry, mode);
3297
3298 /* (then/else) start at top of log */
bb8c093b 3299 iwl3945_print_event_log(priv, 0, next_entry, mode);
b481de9c 3300
5d49f498 3301 iwl_release_nic_access(priv);
b481de9c
ZY
3302}
3303
3304/**
bb8c093b 3305 * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 3306 */
4a8a4322 3307static void iwl3945_irq_handle_error(struct iwl_priv *priv)
b481de9c 3308{
bb8c093b 3309 /* Set the FW error flag -- cleared on iwl3945_down */
b481de9c
ZY
3310 set_bit(STATUS_FW_ERROR, &priv->status);
3311
3312 /* Cancel currently queued command. */
3313 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3314
c8b0e6e1 3315#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3316 if (priv->debug_level & IWL_DL_FW_ERRORS) {
bb8c093b
CH
3317 iwl3945_dump_nic_error_log(priv);
3318 iwl3945_dump_nic_event_log(priv);
f2c7e521 3319 iwl3945_print_rx_config_cmd(priv, &priv->staging39_rxon);
b481de9c
ZY
3320 }
3321#endif
3322
3323 wake_up_interruptible(&priv->wait_command_queue);
3324
3325 /* Keep the restart process from trying to send host
3326 * commands by clearing the INIT status bit */
3327 clear_bit(STATUS_READY, &priv->status);
3328
3329 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3330 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
3331 "Restarting adapter due to uCode error.\n");
3332
bb8c093b 3333 if (iwl3945_is_associated(priv)) {
f2c7e521
AK
3334 memcpy(&priv->recovery39_rxon, &priv->active39_rxon,
3335 sizeof(priv->recovery39_rxon));
b481de9c
ZY
3336 priv->error_recovering = 1;
3337 }
3338 queue_work(priv->workqueue, &priv->restart);
3339 }
3340}
3341
4a8a4322 3342static void iwl3945_error_recovery(struct iwl_priv *priv)
b481de9c
ZY
3343{
3344 unsigned long flags;
3345
f2c7e521
AK
3346 memcpy(&priv->staging39_rxon, &priv->recovery39_rxon,
3347 sizeof(priv->staging39_rxon));
3348 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 3349 iwl3945_commit_rxon(priv);
b481de9c 3350
bb8c093b 3351 iwl3945_add_station(priv, priv->bssid, 1, 0);
b481de9c
ZY
3352
3353 spin_lock_irqsave(&priv->lock, flags);
f2c7e521 3354 priv->assoc_id = le16_to_cpu(priv->staging39_rxon.assoc_id);
b481de9c
ZY
3355 priv->error_recovering = 0;
3356 spin_unlock_irqrestore(&priv->lock, flags);
3357}
3358
4a8a4322 3359static void iwl3945_irq_tasklet(struct iwl_priv *priv)
b481de9c
ZY
3360{
3361 u32 inta, handled = 0;
3362 u32 inta_fh;
3363 unsigned long flags;
c8b0e6e1 3364#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
3365 u32 inta_mask;
3366#endif
3367
3368 spin_lock_irqsave(&priv->lock, flags);
3369
3370 /* Ack/clear/reset pending uCode interrupts.
3371 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
3372 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
5d49f498
AK
3373 inta = iwl_read32(priv, CSR_INT);
3374 iwl_write32(priv, CSR_INT, inta);
b481de9c
ZY
3375
3376 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
3377 * Any new interrupts that happen after this, either while we're
3378 * in this tasklet, or later, will show up in next ISR/tasklet. */
5d49f498
AK
3379 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
3380 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 3381
c8b0e6e1 3382#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3383 if (priv->debug_level & IWL_DL_ISR) {
9fbab516 3384 /* just for debug */
5d49f498 3385 inta_mask = iwl_read32(priv, CSR_INT_MASK);
b481de9c
ZY
3386 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3387 inta, inta_mask, inta_fh);
3388 }
3389#endif
3390
3391 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
3392 * atomic, make sure that inta covers all the interrupts that
3393 * we've discovered, even if FH interrupt came in just after
3394 * reading CSR_INT. */
6f83eaa1 3395 if (inta_fh & CSR39_FH_INT_RX_MASK)
b481de9c 3396 inta |= CSR_INT_BIT_FH_RX;
6f83eaa1 3397 if (inta_fh & CSR39_FH_INT_TX_MASK)
b481de9c
ZY
3398 inta |= CSR_INT_BIT_FH_TX;
3399
3400 /* Now service all interrupt bits discovered above. */
3401 if (inta & CSR_INT_BIT_HW_ERR) {
15b1687c 3402 IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
b481de9c
ZY
3403
3404 /* Tell the device to stop sending interrupts */
bb8c093b 3405 iwl3945_disable_interrupts(priv);
b481de9c 3406
bb8c093b 3407 iwl3945_irq_handle_error(priv);
b481de9c
ZY
3408
3409 handled |= CSR_INT_BIT_HW_ERR;
3410
3411 spin_unlock_irqrestore(&priv->lock, flags);
3412
3413 return;
3414 }
3415
c8b0e6e1 3416#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3417 if (priv->debug_level & (IWL_DL_ISR)) {
b481de9c 3418 /* NIC fires this, but we don't use it, redundant with WAKEUP */
25c03d8e
JP
3419 if (inta & CSR_INT_BIT_SCD)
3420 IWL_DEBUG_ISR("Scheduler finished to transmit "
3421 "the frame/frames.\n");
b481de9c
ZY
3422
3423 /* Alive notification via Rx interrupt will do the real work */
3424 if (inta & CSR_INT_BIT_ALIVE)
3425 IWL_DEBUG_ISR("Alive interrupt\n");
3426 }
3427#endif
3428 /* Safely ignore these bits for debug checks below */
25c03d8e 3429 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
b481de9c 3430
b481de9c
ZY
3431 /* Error detected by uCode */
3432 if (inta & CSR_INT_BIT_SW_ERR) {
15b1687c
WT
3433 IWL_ERR(priv, "Microcode SW error detected. "
3434 "Restarting 0x%X.\n", inta);
bb8c093b 3435 iwl3945_irq_handle_error(priv);
b481de9c
ZY
3436 handled |= CSR_INT_BIT_SW_ERR;
3437 }
3438
3439 /* uCode wakes up after power-down sleep */
3440 if (inta & CSR_INT_BIT_WAKEUP) {
3441 IWL_DEBUG_ISR("Wakeup interrupt\n");
141c43a3 3442 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4f3602c8
SO
3443 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
3444 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
3445 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
3446 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
3447 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
3448 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
3449
3450 handled |= CSR_INT_BIT_WAKEUP;
3451 }
3452
3453 /* All uCode command responses, including Tx command responses,
3454 * Rx "responses" (frame-received notification), and other
3455 * notifications from uCode come through here*/
3456 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 3457 iwl3945_rx_handle(priv);
b481de9c
ZY
3458 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
3459 }
3460
3461 if (inta & CSR_INT_BIT_FH_TX) {
3462 IWL_DEBUG_ISR("Tx interrupt\n");
3463
5d49f498
AK
3464 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
3465 if (!iwl_grab_nic_access(priv)) {
3466 iwl_write_direct32(priv, FH39_TCSR_CREDIT
bddadf86 3467 (FH39_SRVC_CHNL), 0x0);
5d49f498 3468 iwl_release_nic_access(priv);
b481de9c
ZY
3469 }
3470 handled |= CSR_INT_BIT_FH_TX;
3471 }
3472
3473 if (inta & ~handled)
15b1687c 3474 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
b481de9c
ZY
3475
3476 if (inta & ~CSR_INI_SET_MASK) {
39aadf8c 3477 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
b481de9c 3478 inta & ~CSR_INI_SET_MASK);
39aadf8c 3479 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
b481de9c
ZY
3480 }
3481
3482 /* Re-enable all interrupts */
0359facc
MA
3483 /* only Re-enable if disabled by irq */
3484 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3485 iwl3945_enable_interrupts(priv);
b481de9c 3486
c8b0e6e1 3487#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3488 if (priv->debug_level & (IWL_DL_ISR)) {
5d49f498
AK
3489 inta = iwl_read32(priv, CSR_INT);
3490 inta_mask = iwl_read32(priv, CSR_INT_MASK);
3491 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
3492 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
3493 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
3494 }
3495#endif
3496 spin_unlock_irqrestore(&priv->lock, flags);
3497}
3498
bb8c093b 3499static irqreturn_t iwl3945_isr(int irq, void *data)
b481de9c 3500{
4a8a4322 3501 struct iwl_priv *priv = data;
b481de9c
ZY
3502 u32 inta, inta_mask;
3503 u32 inta_fh;
3504 if (!priv)
3505 return IRQ_NONE;
3506
3507 spin_lock(&priv->lock);
3508
3509 /* Disable (but don't clear!) interrupts here to avoid
3510 * back-to-back ISRs and sporadic interrupts from our NIC.
3511 * If we have something to service, the tasklet will re-enable ints.
3512 * If we *don't* have something, we'll re-enable before leaving here. */
5d49f498
AK
3513 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
3514 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
3515
3516 /* Discover which interrupts are active/pending */
5d49f498
AK
3517 inta = iwl_read32(priv, CSR_INT);
3518 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
3519
3520 /* Ignore interrupt if there's nothing in NIC to service.
3521 * This may be due to IRQ shared with another device,
3522 * or due to sporadic interrupts thrown from our NIC. */
3523 if (!inta && !inta_fh) {
3524 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
3525 goto none;
3526 }
3527
3528 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
3529 /* Hardware disappeared */
39aadf8c 3530 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
cb4da1a3 3531 goto unplugged;
b481de9c
ZY
3532 }
3533
3534 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3535 inta, inta_mask, inta_fh);
3536
25c03d8e
JP
3537 inta &= ~CSR_INT_BIT_SCD;
3538
bb8c093b 3539 /* iwl3945_irq_tasklet() will service interrupts and re-enable them */
25c03d8e
JP
3540 if (likely(inta || inta_fh))
3541 tasklet_schedule(&priv->irq_tasklet);
cb4da1a3 3542unplugged:
b481de9c
ZY
3543 spin_unlock(&priv->lock);
3544
3545 return IRQ_HANDLED;
3546
3547 none:
3548 /* re-enable interrupts here since we don't have anything to service. */
0359facc
MA
3549 /* only Re-enable if disabled by irq */
3550 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3551 iwl3945_enable_interrupts(priv);
b481de9c
ZY
3552 spin_unlock(&priv->lock);
3553 return IRQ_NONE;
3554}
3555
3556/************************** EEPROM BANDS ****************************
3557 *
bb8c093b 3558 * The iwl3945_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
3559 * EEPROM contents to the specific channel number supported for each
3560 * band.
3561 *
f2c7e521 3562 * For example, iwl3945_priv->eeprom39.band_3_channels[4] from the band_3
b481de9c
ZY
3563 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
3564 * The specific geography and calibration information for that channel
3565 * is contained in the eeprom map itself.
3566 *
3567 * During init, we copy the eeprom information and channel map
3568 * information into priv->channel_info_24/52 and priv->channel_map_24/52
3569 *
3570 * channel_map_24/52 provides the index in the channel_info array for a
3571 * given channel. We have to have two separate maps as there is channel
3572 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
3573 * band_2
3574 *
3575 * A value of 0xff stored in the channel_map indicates that the channel
3576 * is not supported by the hardware at all.
3577 *
3578 * A value of 0xfe in the channel_map indicates that the channel is not
3579 * valid for Tx with the current hardware. This means that
3580 * while the system can tune and receive on a given channel, it may not
3581 * be able to associate or transmit any frames on that
3582 * channel. There is no corresponding channel information for that
3583 * entry.
3584 *
3585 *********************************************************************/
3586
3587/* 2.4 GHz */
bb8c093b 3588static const u8 iwl3945_eeprom_band_1[14] = {
b481de9c
ZY
3589 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
3590};
3591
3592/* 5.2 GHz bands */
9fbab516 3593static const u8 iwl3945_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
3594 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
3595};
3596
9fbab516 3597static const u8 iwl3945_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
3598 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
3599};
3600
bb8c093b 3601static const u8 iwl3945_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
3602 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
3603};
3604
bb8c093b 3605static const u8 iwl3945_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
3606 145, 149, 153, 157, 161, 165
3607};
3608
4a8a4322 3609static void iwl3945_init_band_reference(const struct iwl_priv *priv, int band,
b481de9c 3610 int *eeprom_ch_count,
0f741d99 3611 const struct iwl_eeprom_channel
b481de9c
ZY
3612 **eeprom_ch_info,
3613 const u8 **eeprom_ch_index)
3614{
3615 switch (band) {
3616 case 1: /* 2.4GHz band */
bb8c093b 3617 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
f2c7e521 3618 *eeprom_ch_info = priv->eeprom39.band_1_channels;
bb8c093b 3619 *eeprom_ch_index = iwl3945_eeprom_band_1;
b481de9c 3620 break;
9fbab516 3621 case 2: /* 4.9GHz band */
bb8c093b 3622 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
f2c7e521 3623 *eeprom_ch_info = priv->eeprom39.band_2_channels;
bb8c093b 3624 *eeprom_ch_index = iwl3945_eeprom_band_2;
b481de9c
ZY
3625 break;
3626 case 3: /* 5.2GHz band */
bb8c093b 3627 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
f2c7e521 3628 *eeprom_ch_info = priv->eeprom39.band_3_channels;
bb8c093b 3629 *eeprom_ch_index = iwl3945_eeprom_band_3;
b481de9c 3630 break;
9fbab516 3631 case 4: /* 5.5GHz band */
bb8c093b 3632 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
f2c7e521 3633 *eeprom_ch_info = priv->eeprom39.band_4_channels;
bb8c093b 3634 *eeprom_ch_index = iwl3945_eeprom_band_4;
b481de9c 3635 break;
9fbab516 3636 case 5: /* 5.7GHz band */
bb8c093b 3637 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
f2c7e521 3638 *eeprom_ch_info = priv->eeprom39.band_5_channels;
bb8c093b 3639 *eeprom_ch_index = iwl3945_eeprom_band_5;
b481de9c
ZY
3640 break;
3641 default:
3642 BUG();
3643 return;
3644 }
3645}
3646
6440adb5
BC
3647/**
3648 * iwl3945_get_channel_info - Find driver's private channel info
3649 *
3650 * Based on band and channel number.
3651 */
d20b3c65 3652const struct iwl_channel_info *
4a8a4322 3653iwl3945_get_channel_info(const struct iwl_priv *priv,
d20b3c65 3654 enum ieee80211_band band, u16 channel)
b481de9c
ZY
3655{
3656 int i;
3657
8318d78a
JB
3658 switch (band) {
3659 case IEEE80211_BAND_5GHZ:
b481de9c
ZY
3660 for (i = 14; i < priv->channel_count; i++) {
3661 if (priv->channel_info[i].channel == channel)
3662 return &priv->channel_info[i];
3663 }
3664 break;
3665
8318d78a 3666 case IEEE80211_BAND_2GHZ:
b481de9c
ZY
3667 if (channel >= 1 && channel <= 14)
3668 return &priv->channel_info[channel - 1];
3669 break;
8318d78a
JB
3670 case IEEE80211_NUM_BANDS:
3671 WARN_ON(1);
b481de9c
ZY
3672 }
3673
3674 return NULL;
3675}
3676
3677#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
3678 ? # x " " : "")
3679
6440adb5
BC
3680/**
3681 * iwl3945_init_channel_map - Set up driver's info for all possible channels
3682 */
4a8a4322 3683static int iwl3945_init_channel_map(struct iwl_priv *priv)
b481de9c
ZY
3684{
3685 int eeprom_ch_count = 0;
3686 const u8 *eeprom_ch_index = NULL;
0f741d99 3687 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 3688 int band, ch;
d20b3c65 3689 struct iwl_channel_info *ch_info;
b481de9c
ZY
3690
3691 if (priv->channel_count) {
3692 IWL_DEBUG_INFO("Channel map already initialized.\n");
3693 return 0;
3694 }
3695
f2c7e521 3696 if (priv->eeprom39.version < 0x2f) {
39aadf8c 3697 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
f2c7e521 3698 priv->eeprom39.version);
b481de9c
ZY
3699 return -EINVAL;
3700 }
3701
3702 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
3703
3704 priv->channel_count =
bb8c093b
CH
3705 ARRAY_SIZE(iwl3945_eeprom_band_1) +
3706 ARRAY_SIZE(iwl3945_eeprom_band_2) +
3707 ARRAY_SIZE(iwl3945_eeprom_band_3) +
3708 ARRAY_SIZE(iwl3945_eeprom_band_4) +
3709 ARRAY_SIZE(iwl3945_eeprom_band_5);
b481de9c
ZY
3710
3711 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
3712
d20b3c65 3713 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
b481de9c
ZY
3714 priv->channel_count, GFP_KERNEL);
3715 if (!priv->channel_info) {
15b1687c 3716 IWL_ERR(priv, "Could not allocate channel_info\n");
b481de9c
ZY
3717 priv->channel_count = 0;
3718 return -ENOMEM;
3719 }
3720
3721 ch_info = priv->channel_info;
3722
3723 /* Loop through the 5 EEPROM bands adding them in order to the
3724 * channel map we maintain (that contains additional information than
3725 * what just in the EEPROM) */
3726 for (band = 1; band <= 5; band++) {
3727
bb8c093b 3728 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
3729 &eeprom_ch_info, &eeprom_ch_index);
3730
3731 /* Loop through each band adding each of the channels */
3732 for (ch = 0; ch < eeprom_ch_count; ch++) {
3733 ch_info->channel = eeprom_ch_index[ch];
8318d78a
JB
3734 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
3735 IEEE80211_BAND_5GHZ;
b481de9c
ZY
3736
3737 /* permanently store EEPROM's channel regulatory flags
3738 * and max power in channel info database. */
3739 ch_info->eeprom = eeprom_ch_info[ch];
3740
3741 /* Copy the run-time flags so they are there even on
3742 * invalid channels */
3743 ch_info->flags = eeprom_ch_info[ch].flags;
3744
3745 if (!(is_channel_valid(ch_info))) {
3746 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
3747 "No traffic\n",
3748 ch_info->channel,
3749 ch_info->flags,
3750 is_channel_a_band(ch_info) ?
3751 "5.2" : "2.4");
3752 ch_info++;
3753 continue;
3754 }
3755
3756 /* Initialize regulatory-based run-time data */
3757 ch_info->max_power_avg = ch_info->curr_txpow =
3758 eeprom_ch_info[ch].max_power_avg;
3759 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
3760 ch_info->min_power = 0;
3761
fe7c4040 3762 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
b481de9c
ZY
3763 " %ddBm): Ad-Hoc %ssupported\n",
3764 ch_info->channel,
3765 is_channel_a_band(ch_info) ?
3766 "5.2" : "2.4",
8211ef78 3767 CHECK_AND_PRINT(VALID),
b481de9c
ZY
3768 CHECK_AND_PRINT(IBSS),
3769 CHECK_AND_PRINT(ACTIVE),
3770 CHECK_AND_PRINT(RADAR),
3771 CHECK_AND_PRINT(WIDE),
b481de9c
ZY
3772 CHECK_AND_PRINT(DFS),
3773 eeprom_ch_info[ch].flags,
3774 eeprom_ch_info[ch].max_power_avg,
3775 ((eeprom_ch_info[ch].
3776 flags & EEPROM_CHANNEL_IBSS)
3777 && !(eeprom_ch_info[ch].
3778 flags & EEPROM_CHANNEL_RADAR))
3779 ? "" : "not ");
3780
62ea9c5b 3781 /* Set the tx_power_user_lmt to the highest power
b481de9c
ZY
3782 * supported by any channel */
3783 if (eeprom_ch_info[ch].max_power_avg >
62ea9c5b
WT
3784 priv->tx_power_user_lmt)
3785 priv->tx_power_user_lmt =
b481de9c
ZY
3786 eeprom_ch_info[ch].max_power_avg;
3787
3788 ch_info++;
3789 }
3790 }
3791
6440adb5 3792 /* Set up txpower settings in driver for all channels */
b481de9c
ZY
3793 if (iwl3945_txpower_set_from_eeprom(priv))
3794 return -EIO;
3795
3796 return 0;
3797}
3798
849e0dce
RC
3799/*
3800 * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
3801 */
4a8a4322 3802static void iwl3945_free_channel_map(struct iwl_priv *priv)
849e0dce
RC
3803{
3804 kfree(priv->channel_info);
3805 priv->channel_count = 0;
3806}
3807
4a8a4322 3808static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
8318d78a 3809 enum ieee80211_band band,
f9340520 3810 u8 is_active, u8 n_probes,
bb8c093b 3811 struct iwl3945_scan_channel *scan_ch)
b481de9c
ZY
3812{
3813 const struct ieee80211_channel *channels = NULL;
8318d78a 3814 const struct ieee80211_supported_band *sband;
d20b3c65 3815 const struct iwl_channel_info *ch_info;
b481de9c
ZY
3816 u16 passive_dwell = 0;
3817 u16 active_dwell = 0;
3818 int added, i;
3819
cbba18c6 3820 sband = iwl_get_hw_mode(priv, band);
8318d78a 3821 if (!sband)
b481de9c
ZY
3822 return 0;
3823
8318d78a 3824 channels = sband->channels;
b481de9c 3825
77fecfb8
SO
3826 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
3827 passive_dwell = iwl_get_passive_dwell_time(priv, band);
b481de9c 3828
8f4807a1
AK
3829 if (passive_dwell <= active_dwell)
3830 passive_dwell = active_dwell + 1;
3831
8318d78a 3832 for (i = 0, added = 0; i < sband->n_channels; i++) {
182e2e66
JB
3833 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
3834 continue;
3835
8318d78a 3836 scan_ch->channel = channels[i].hw_value;
b481de9c 3837
8318d78a 3838 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
b481de9c 3839 if (!is_channel_valid(ch_info)) {
66b5004d 3840 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
b481de9c
ZY
3841 scan_ch->channel);
3842 continue;
3843 }
3844
011a0330
AK
3845 scan_ch->active_dwell = cpu_to_le16(active_dwell);
3846 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
3847 /* If passive , set up for auto-switch
3848 * and use long active_dwell time.
3849 */
b481de9c 3850 if (!is_active || is_channel_passive(ch_info) ||
011a0330 3851 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
b481de9c 3852 scan_ch->type = 0; /* passive */
011a0330
AK
3853 if (IWL_UCODE_API(priv->ucode_ver) == 1)
3854 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
3855 } else {
b481de9c 3856 scan_ch->type = 1; /* active */
011a0330 3857 }
b481de9c 3858
011a0330
AK
3859 /* Set direct probe bits. These may be used both for active
3860 * scan channels (probes gets sent right away),
3861 * or for passive channels (probes get se sent only after
3862 * hearing clear Rx packet).*/
3863 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
3864 if (n_probes)
3865 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
3866 } else {
3867 /* uCode v1 does not allow setting direct probe bits on
3868 * passive channel. */
3869 if ((scan_ch->type & 1) && n_probes)
3870 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
3871 }
b481de9c 3872
9fbab516 3873 /* Set txpower levels to defaults */
b481de9c
ZY
3874 scan_ch->tpc.dsp_atten = 110;
3875 /* scan_pwr_info->tpc.dsp_atten; */
3876
3877 /*scan_pwr_info->tpc.tx_gain; */
8318d78a 3878 if (band == IEEE80211_BAND_5GHZ)
b481de9c
ZY
3879 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
3880 else {
3881 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
3882 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516 3883 * power level:
8a1b0245 3884 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
b481de9c
ZY
3885 */
3886 }
3887
3888 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
3889 scan_ch->channel,
3890 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
3891 (scan_ch->type & 1) ?
3892 active_dwell : passive_dwell);
3893
3894 scan_ch++;
3895 added++;
3896 }
3897
3898 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
3899 return added;
3900}
3901
4a8a4322 3902static void iwl3945_init_hw_rates(struct iwl_priv *priv,
b481de9c
ZY
3903 struct ieee80211_rate *rates)
3904{
3905 int i;
3906
3907 for (i = 0; i < IWL_RATE_COUNT; i++) {
8318d78a
JB
3908 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
3909 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3910 rates[i].hw_value_short = i;
3911 rates[i].flags = 0;
d9829a67 3912 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
b481de9c 3913 /*
8318d78a 3914 * If CCK != 1M then set short preamble rate flag.
b481de9c 3915 */
bb8c093b 3916 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
8318d78a 3917 0 : IEEE80211_RATE_SHORT_PREAMBLE;
b481de9c 3918 }
b481de9c
ZY
3919 }
3920}
3921
3922/**
bb8c093b 3923 * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 3924 */
4a8a4322 3925static int iwl3945_init_geos(struct iwl_priv *priv)
b481de9c 3926{
d20b3c65 3927 struct iwl_channel_info *ch;
8211ef78 3928 struct ieee80211_supported_band *sband;
b481de9c
ZY
3929 struct ieee80211_channel *channels;
3930 struct ieee80211_channel *geo_ch;
3931 struct ieee80211_rate *rates;
3932 int i = 0;
b481de9c 3933
8318d78a
JB
3934 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3935 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
b481de9c
ZY
3936 IWL_DEBUG_INFO("Geography modes already initialized.\n");
3937 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
3938 return 0;
3939 }
3940
b481de9c
ZY
3941 channels = kzalloc(sizeof(struct ieee80211_channel) *
3942 priv->channel_count, GFP_KERNEL);
8318d78a 3943 if (!channels)
b481de9c 3944 return -ENOMEM;
b481de9c 3945
8211ef78 3946 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
b481de9c
ZY
3947 GFP_KERNEL);
3948 if (!rates) {
b481de9c
ZY
3949 kfree(channels);
3950 return -ENOMEM;
3951 }
3952
b481de9c 3953 /* 5.2GHz channels start after the 2.4GHz channels */
8211ef78
TW
3954 sband = &priv->bands[IEEE80211_BAND_5GHZ];
3955 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
3956 /* just OFDM */
3957 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
3958 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
3959
3960 sband = &priv->bands[IEEE80211_BAND_2GHZ];
3961 sband->channels = channels;
3962 /* OFDM & CCK */
3963 sband->bitrates = rates;
3964 sband->n_bitrates = IWL_RATE_COUNT;
b481de9c
ZY
3965
3966 priv->ieee_channels = channels;
3967 priv->ieee_rates = rates;
3968
bb8c093b 3969 iwl3945_init_hw_rates(priv, rates);
b481de9c 3970
8211ef78 3971 for (i = 0; i < priv->channel_count; i++) {
b481de9c
ZY
3972 ch = &priv->channel_info[i];
3973
8211ef78
TW
3974 /* FIXME: might be removed if scan is OK*/
3975 if (!is_channel_valid(ch))
b481de9c 3976 continue;
b481de9c
ZY
3977
3978 if (is_channel_a_band(ch))
8211ef78 3979 sband = &priv->bands[IEEE80211_BAND_5GHZ];
8318d78a 3980 else
8211ef78 3981 sband = &priv->bands[IEEE80211_BAND_2GHZ];
b481de9c 3982
8211ef78
TW
3983 geo_ch = &sband->channels[sband->n_channels++];
3984
3985 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
8318d78a
JB
3986 geo_ch->max_power = ch->max_power_avg;
3987 geo_ch->max_antenna_gain = 0xff;
7b72304d 3988 geo_ch->hw_value = ch->channel;
b481de9c
ZY
3989
3990 if (is_channel_valid(ch)) {
8318d78a
JB
3991 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3992 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
b481de9c 3993
8318d78a
JB
3994 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3995 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
b481de9c
ZY
3996
3997 if (ch->flags & EEPROM_CHANNEL_RADAR)
8318d78a 3998 geo_ch->flags |= IEEE80211_CHAN_RADAR;
b481de9c 3999
62ea9c5b
WT
4000 if (ch->max_power_avg > priv->tx_power_channel_lmt)
4001 priv->tx_power_channel_lmt =
b481de9c 4002 ch->max_power_avg;
8211ef78 4003 } else {
8318d78a 4004 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
8211ef78
TW
4005 }
4006
4007 /* Save flags for reg domain usage */
4008 geo_ch->orig_flags = geo_ch->flags;
4009
4010 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4011 ch->channel, geo_ch->center_freq,
4012 is_channel_a_band(ch) ? "5.2" : "2.4",
4013 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4014 "restricted" : "valid",
4015 geo_ch->flags);
b481de9c
ZY
4016 }
4017
82b9a121
TW
4018 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4019 priv->cfg->sku & IWL_SKU_A) {
978785a3
TW
4020 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
4021 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
4022 priv->pci_dev->device, priv->pci_dev->subsystem_device);
82b9a121 4023 priv->cfg->sku &= ~IWL_SKU_A;
b481de9c
ZY
4024 }
4025
978785a3 4026 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
8318d78a
JB
4027 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4028 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
b481de9c 4029
e0e0a67e
JL
4030 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4031 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4032 &priv->bands[IEEE80211_BAND_2GHZ];
4033 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4034 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4035 &priv->bands[IEEE80211_BAND_5GHZ];
b481de9c 4036
b481de9c
ZY
4037 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4038
4039 return 0;
4040}
4041
849e0dce
RC
4042/*
4043 * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4044 */
4a8a4322 4045static void iwl3945_free_geos(struct iwl_priv *priv)
849e0dce 4046{
849e0dce
RC
4047 kfree(priv->ieee_channels);
4048 kfree(priv->ieee_rates);
4049 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4050}
4051
b481de9c
ZY
4052/******************************************************************************
4053 *
4054 * uCode download functions
4055 *
4056 ******************************************************************************/
4057
4a8a4322 4058static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
b481de9c 4059{
98c92211
TW
4060 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4061 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
4062 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4063 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
4064 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4065 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c
ZY
4066}
4067
4068/**
bb8c093b 4069 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
4070 * looking at all data.
4071 */
4a8a4322 4072static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
4073{
4074 u32 val;
4075 u32 save_len = len;
4076 int rc = 0;
4077 u32 errcnt;
4078
4079 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4080
5d49f498 4081 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4082 if (rc)
4083 return rc;
4084
5d49f498 4085 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 4086 IWL39_RTC_INST_LOWER_BOUND);
b481de9c
ZY
4087
4088 errcnt = 0;
4089 for (; len > 0; len -= sizeof(u32), image++) {
4090 /* read data comes through single port, auto-incr addr */
4091 /* NOTE: Use the debugless read so we don't flood kernel log
4092 * if IWL_DL_IO is set */
5d49f498 4093 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c 4094 if (val != le32_to_cpu(*image)) {
15b1687c 4095 IWL_ERR(priv, "uCode INST section is invalid at "
b481de9c
ZY
4096 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4097 save_len - len, val, le32_to_cpu(*image));
4098 rc = -EIO;
4099 errcnt++;
4100 if (errcnt >= 20)
4101 break;
4102 }
4103 }
4104
5d49f498 4105 iwl_release_nic_access(priv);
b481de9c
ZY
4106
4107 if (!errcnt)
bc434dd2 4108 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n");
b481de9c
ZY
4109
4110 return rc;
4111}
4112
4113
4114/**
bb8c093b 4115 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
4116 * using sample data 100 bytes apart. If these sample points are good,
4117 * it's a pretty good bet that everything between them is good, too.
4118 */
4a8a4322 4119static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
4120{
4121 u32 val;
4122 int rc = 0;
4123 u32 errcnt = 0;
4124 u32 i;
4125
4126 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4127
5d49f498 4128 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4129 if (rc)
4130 return rc;
4131
4132 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4133 /* read data comes through single port, auto-incr addr */
4134 /* NOTE: Use the debugless read so we don't flood kernel log
4135 * if IWL_DL_IO is set */
5d49f498 4136 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 4137 i + IWL39_RTC_INST_LOWER_BOUND);
5d49f498 4138 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
4139 if (val != le32_to_cpu(*image)) {
4140#if 0 /* Enable this if you want to see details */
15b1687c 4141 IWL_ERR(priv, "uCode INST section is invalid at "
b481de9c
ZY
4142 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4143 i, val, *image);
4144#endif
4145 rc = -EIO;
4146 errcnt++;
4147 if (errcnt >= 3)
4148 break;
4149 }
4150 }
4151
5d49f498 4152 iwl_release_nic_access(priv);
b481de9c
ZY
4153
4154 return rc;
4155}
4156
4157
4158/**
bb8c093b 4159 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
4160 * and verify its contents
4161 */
4a8a4322 4162static int iwl3945_verify_ucode(struct iwl_priv *priv)
b481de9c
ZY
4163{
4164 __le32 *image;
4165 u32 len;
4166 int rc = 0;
4167
4168 /* Try bootstrap */
4169 image = (__le32 *)priv->ucode_boot.v_addr;
4170 len = priv->ucode_boot.len;
bb8c093b 4171 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4172 if (rc == 0) {
4173 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4174 return 0;
4175 }
4176
4177 /* Try initialize */
4178 image = (__le32 *)priv->ucode_init.v_addr;
4179 len = priv->ucode_init.len;
bb8c093b 4180 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4181 if (rc == 0) {
4182 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4183 return 0;
4184 }
4185
4186 /* Try runtime/protocol */
4187 image = (__le32 *)priv->ucode_code.v_addr;
4188 len = priv->ucode_code.len;
bb8c093b 4189 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4190 if (rc == 0) {
4191 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4192 return 0;
4193 }
4194
15b1687c 4195 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
b481de9c 4196
9fbab516
BC
4197 /* Since nothing seems to match, show first several data entries in
4198 * instruction SRAM, so maybe visual inspection will give a clue.
4199 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
4200 image = (__le32 *)priv->ucode_boot.v_addr;
4201 len = priv->ucode_boot.len;
bb8c093b 4202 rc = iwl3945_verify_inst_full(priv, image, len);
b481de9c
ZY
4203
4204 return rc;
4205}
4206
4a8a4322 4207static void iwl3945_nic_start(struct iwl_priv *priv)
b481de9c
ZY
4208{
4209 /* Remove all resets to allow NIC to operate */
5d49f498 4210 iwl_write32(priv, CSR_RESET, 0);
b481de9c
ZY
4211}
4212
4213/**
bb8c093b 4214 * iwl3945_read_ucode - Read uCode images from disk file.
b481de9c
ZY
4215 *
4216 * Copy into buffers for card to fetch via bus-mastering
4217 */
4a8a4322 4218static int iwl3945_read_ucode(struct iwl_priv *priv)
b481de9c 4219{
a78fe754 4220 struct iwl_ucode *ucode;
a0987a8d 4221 int ret = -EINVAL, index;
b481de9c
ZY
4222 const struct firmware *ucode_raw;
4223 /* firmware file name contains uCode/driver compatibility version */
a0987a8d
RC
4224 const char *name_pre = priv->cfg->fw_name_pre;
4225 const unsigned int api_max = priv->cfg->ucode_api_max;
4226 const unsigned int api_min = priv->cfg->ucode_api_min;
4227 char buf[25];
b481de9c
ZY
4228 u8 *src;
4229 size_t len;
a0987a8d 4230 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
b481de9c
ZY
4231
4232 /* Ask kernel firmware_class module to get the boot firmware off disk.
4233 * request_firmware() is synchronous, file is in memory on return. */
a0987a8d
RC
4234 for (index = api_max; index >= api_min; index--) {
4235 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
4236 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
4237 if (ret < 0) {
15b1687c 4238 IWL_ERR(priv, "%s firmware file req failed: %d\n",
a0987a8d
RC
4239 buf, ret);
4240 if (ret == -ENOENT)
4241 continue;
4242 else
4243 goto error;
4244 } else {
4245 if (index < api_max)
15b1687c
WT
4246 IWL_ERR(priv, "Loaded firmware %s, "
4247 "which is deprecated. "
4248 " Please use API v%u instead.\n",
a0987a8d
RC
4249 buf, api_max);
4250 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
4251 buf, ucode_raw->size);
4252 break;
4253 }
b481de9c
ZY
4254 }
4255
a0987a8d
RC
4256 if (ret < 0)
4257 goto error;
b481de9c
ZY
4258
4259 /* Make sure that we got at least our header! */
4260 if (ucode_raw->size < sizeof(*ucode)) {
15b1687c 4261 IWL_ERR(priv, "File size way too small!\n");
90e759d1 4262 ret = -EINVAL;
b481de9c
ZY
4263 goto err_release;
4264 }
4265
4266 /* Data from ucode file: header followed by uCode images */
4267 ucode = (void *)ucode_raw->data;
4268
c02b3acd 4269 priv->ucode_ver = le32_to_cpu(ucode->ver);
a0987a8d 4270 api_ver = IWL_UCODE_API(priv->ucode_ver);
b481de9c
ZY
4271 inst_size = le32_to_cpu(ucode->inst_size);
4272 data_size = le32_to_cpu(ucode->data_size);
4273 init_size = le32_to_cpu(ucode->init_size);
4274 init_data_size = le32_to_cpu(ucode->init_data_size);
4275 boot_size = le32_to_cpu(ucode->boot_size);
4276
a0987a8d
RC
4277 /* api_ver should match the api version forming part of the
4278 * firmware filename ... but we don't check for that and only rely
4279 * on the API version read from firware header from here on forward */
4280
4281 if (api_ver < api_min || api_ver > api_max) {
15b1687c 4282 IWL_ERR(priv, "Driver unable to support your firmware API. "
a0987a8d
RC
4283 "Driver supports v%u, firmware is v%u.\n",
4284 api_max, api_ver);
4285 priv->ucode_ver = 0;
4286 ret = -EINVAL;
4287 goto err_release;
4288 }
4289 if (api_ver != api_max)
15b1687c 4290 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
a0987a8d
RC
4291 "got %u. New firmware can be obtained "
4292 "from http://www.intellinuxwireless.org.\n",
4293 api_max, api_ver);
4294
978785a3
TW
4295 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
4296 IWL_UCODE_MAJOR(priv->ucode_ver),
4297 IWL_UCODE_MINOR(priv->ucode_ver),
4298 IWL_UCODE_API(priv->ucode_ver),
4299 IWL_UCODE_SERIAL(priv->ucode_ver));
4300
a0987a8d
RC
4301 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
4302 priv->ucode_ver);
bc434dd2
IS
4303 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
4304 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
4305 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
4306 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
4307 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
b481de9c 4308
a0987a8d 4309
b481de9c
ZY
4310 /* Verify size of file vs. image size info in file's header */
4311 if (ucode_raw->size < sizeof(*ucode) +
4312 inst_size + data_size + init_size +
4313 init_data_size + boot_size) {
4314
4315 IWL_DEBUG_INFO("uCode file size %d too small\n",
4316 (int)ucode_raw->size);
90e759d1 4317 ret = -EINVAL;
b481de9c
ZY
4318 goto err_release;
4319 }
4320
4321 /* Verify that uCode images will fit in card's SRAM */
250bdd21 4322 if (inst_size > IWL39_MAX_INST_SIZE) {
90e759d1
TW
4323 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
4324 inst_size);
4325 ret = -EINVAL;
b481de9c
ZY
4326 goto err_release;
4327 }
4328
250bdd21 4329 if (data_size > IWL39_MAX_DATA_SIZE) {
90e759d1
TW
4330 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
4331 data_size);
4332 ret = -EINVAL;
b481de9c
ZY
4333 goto err_release;
4334 }
250bdd21 4335 if (init_size > IWL39_MAX_INST_SIZE) {
90e759d1
TW
4336 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n",
4337 init_size);
4338 ret = -EINVAL;
b481de9c
ZY
4339 goto err_release;
4340 }
250bdd21 4341 if (init_data_size > IWL39_MAX_DATA_SIZE) {
90e759d1
TW
4342 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n",
4343 init_data_size);
4344 ret = -EINVAL;
b481de9c
ZY
4345 goto err_release;
4346 }
250bdd21 4347 if (boot_size > IWL39_MAX_BSM_SIZE) {
90e759d1
TW
4348 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n",
4349 boot_size);
4350 ret = -EINVAL;
b481de9c
ZY
4351 goto err_release;
4352 }
4353
4354 /* Allocate ucode buffers for card's bus-master loading ... */
4355
4356 /* Runtime instructions and 2 copies of data:
4357 * 1) unmodified from disk
4358 * 2) backup cache for save/restore during power-downs */
4359 priv->ucode_code.len = inst_size;
98c92211 4360 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
4361
4362 priv->ucode_data.len = data_size;
98c92211 4363 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
4364
4365 priv->ucode_data_backup.len = data_size;
98c92211 4366 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c 4367
90e759d1
TW
4368 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
4369 !priv->ucode_data_backup.v_addr)
4370 goto err_pci_alloc;
b481de9c
ZY
4371
4372 /* Initialization instructions and data */
90e759d1
TW
4373 if (init_size && init_data_size) {
4374 priv->ucode_init.len = init_size;
98c92211 4375 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
90e759d1
TW
4376
4377 priv->ucode_init_data.len = init_data_size;
98c92211 4378 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
90e759d1
TW
4379
4380 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
4381 goto err_pci_alloc;
4382 }
b481de9c
ZY
4383
4384 /* Bootstrap (instructions only, no data) */
90e759d1
TW
4385 if (boot_size) {
4386 priv->ucode_boot.len = boot_size;
98c92211 4387 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 4388
90e759d1
TW
4389 if (!priv->ucode_boot.v_addr)
4390 goto err_pci_alloc;
4391 }
b481de9c
ZY
4392
4393 /* Copy images into buffers for card's bus-master reads ... */
4394
4395 /* Runtime instructions (first block of data in file) */
4396 src = &ucode->data[0];
4397 len = priv->ucode_code.len;
90e759d1 4398 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
4399 memcpy(priv->ucode_code.v_addr, src, len);
4400 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4401 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
4402
4403 /* Runtime data (2nd block)
bb8c093b 4404 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
b481de9c
ZY
4405 src = &ucode->data[inst_size];
4406 len = priv->ucode_data.len;
90e759d1 4407 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
4408 memcpy(priv->ucode_data.v_addr, src, len);
4409 memcpy(priv->ucode_data_backup.v_addr, src, len);
4410
4411 /* Initialization instructions (3rd block) */
4412 if (init_size) {
4413 src = &ucode->data[inst_size + data_size];
4414 len = priv->ucode_init.len;
90e759d1
TW
4415 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
4416 len);
b481de9c
ZY
4417 memcpy(priv->ucode_init.v_addr, src, len);
4418 }
4419
4420 /* Initialization data (4th block) */
4421 if (init_data_size) {
4422 src = &ucode->data[inst_size + data_size + init_size];
4423 len = priv->ucode_init_data.len;
4424 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
4425 (int)len);
4426 memcpy(priv->ucode_init_data.v_addr, src, len);
4427 }
4428
4429 /* Bootstrap instructions (5th block) */
4430 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
4431 len = priv->ucode_boot.len;
4432 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
4433 (int)len);
4434 memcpy(priv->ucode_boot.v_addr, src, len);
4435
4436 /* We have our copies now, allow OS release its copies */
4437 release_firmware(ucode_raw);
4438 return 0;
4439
4440 err_pci_alloc:
15b1687c 4441 IWL_ERR(priv, "failed to allocate pci memory\n");
90e759d1 4442 ret = -ENOMEM;
bb8c093b 4443 iwl3945_dealloc_ucode_pci(priv);
b481de9c
ZY
4444
4445 err_release:
4446 release_firmware(ucode_raw);
4447
4448 error:
90e759d1 4449 return ret;
b481de9c
ZY
4450}
4451
4452
4453/**
bb8c093b 4454 * iwl3945_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
4455 *
4456 * Tell initialization uCode where to find runtime uCode.
4457 *
4458 * BSM registers initially contain pointers to initialization uCode.
4459 * We need to replace them to load runtime uCode inst and data,
4460 * and to save runtime data when powering down.
4461 */
4a8a4322 4462static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
b481de9c
ZY
4463{
4464 dma_addr_t pinst;
4465 dma_addr_t pdata;
4466 int rc = 0;
4467 unsigned long flags;
4468
4469 /* bits 31:0 for 3945 */
4470 pinst = priv->ucode_code.p_addr;
4471 pdata = priv->ucode_data_backup.p_addr;
4472
4473 spin_lock_irqsave(&priv->lock, flags);
5d49f498 4474 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4475 if (rc) {
4476 spin_unlock_irqrestore(&priv->lock, flags);
4477 return rc;
4478 }
4479
4480 /* Tell bootstrap uCode where to find image to load */
5d49f498
AK
4481 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
4482 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
4483 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
4484 priv->ucode_data.len);
4485
a96a27f9 4486 /* Inst byte count must be last to set up, bit 31 signals uCode
b481de9c 4487 * that all new ptr/size info is in place */
5d49f498 4488 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
4489 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
4490
5d49f498 4491 iwl_release_nic_access(priv);
b481de9c
ZY
4492
4493 spin_unlock_irqrestore(&priv->lock, flags);
4494
4495 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
4496
4497 return rc;
4498}
4499
4500/**
bb8c093b 4501 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
4502 *
4503 * Called after REPLY_ALIVE notification received from "initialize" uCode.
4504 *
b481de9c 4505 * Tell "initialize" uCode to go ahead and load the runtime uCode.
9fbab516 4506 */
4a8a4322 4507static void iwl3945_init_alive_start(struct iwl_priv *priv)
b481de9c
ZY
4508{
4509 /* Check alive response for "valid" sign from uCode */
4510 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
4511 /* We had an error bringing up the hardware, so take it
4512 * all the way back down so we can try again */
4513 IWL_DEBUG_INFO("Initialize Alive failed.\n");
4514 goto restart;
4515 }
4516
4517 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
4518 * This is a paranoid check, because we would not have gotten the
4519 * "initialize" alive if code weren't properly loaded. */
bb8c093b 4520 if (iwl3945_verify_ucode(priv)) {
b481de9c
ZY
4521 /* Runtime instruction load was bad;
4522 * take it all the way back down so we can try again */
4523 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
4524 goto restart;
4525 }
4526
4527 /* Send pointers to protocol/runtime uCode image ... init code will
4528 * load and launch runtime uCode, which will send us another "Alive"
4529 * notification. */
4530 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 4531 if (iwl3945_set_ucode_ptrs(priv)) {
b481de9c
ZY
4532 /* Runtime instruction load won't happen;
4533 * take it all the way back down so we can try again */
4534 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
4535 goto restart;
4536 }
4537 return;
4538
4539 restart:
4540 queue_work(priv->workqueue, &priv->restart);
4541}
4542
4543
9bdf5eca
MA
4544/* temporary */
4545static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
4546 struct sk_buff *skb);
4547
b481de9c 4548/**
bb8c093b 4549 * iwl3945_alive_start - called after REPLY_ALIVE notification received
b481de9c 4550 * from protocol/runtime uCode (initialization uCode's
bb8c093b 4551 * Alive gets handled by iwl3945_init_alive_start()).
b481de9c 4552 */
4a8a4322 4553static void iwl3945_alive_start(struct iwl_priv *priv)
b481de9c
ZY
4554{
4555 int rc = 0;
4556 int thermal_spin = 0;
4557 u32 rfkill;
4558
4559 IWL_DEBUG_INFO("Runtime Alive received.\n");
4560
4561 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
4562 /* We had an error bringing up the hardware, so take it
4563 * all the way back down so we can try again */
4564 IWL_DEBUG_INFO("Alive failed.\n");
4565 goto restart;
4566 }
4567
4568 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
4569 * This is a paranoid check, because we would not have gotten the
4570 * "runtime" alive if code weren't properly loaded. */
bb8c093b 4571 if (iwl3945_verify_ucode(priv)) {
b481de9c
ZY
4572 /* Runtime instruction load was bad;
4573 * take it all the way back down so we can try again */
4574 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
4575 goto restart;
4576 }
4577
bb8c093b 4578 iwl3945_clear_stations_table(priv);
b481de9c 4579
5d49f498 4580 rc = iwl_grab_nic_access(priv);
b481de9c 4581 if (rc) {
39aadf8c 4582 IWL_WARN(priv, "Can not read RFKILL status from adapter\n");
b481de9c
ZY
4583 return;
4584 }
4585
5d49f498 4586 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
b481de9c 4587 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
5d49f498 4588 iwl_release_nic_access(priv);
b481de9c
ZY
4589
4590 if (rfkill & 0x1) {
4591 clear_bit(STATUS_RF_KILL_HW, &priv->status);
a96a27f9 4592 /* if RFKILL is not on, then wait for thermal
b481de9c 4593 * sensor in adapter to kick in */
bb8c093b 4594 while (iwl3945_hw_get_temperature(priv) == 0) {
b481de9c
ZY
4595 thermal_spin++;
4596 udelay(10);
4597 }
4598
4599 if (thermal_spin)
4600 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
4601 thermal_spin * 10);
4602 } else
4603 set_bit(STATUS_RF_KILL_HW, &priv->status);
4604
9fbab516 4605 /* After the ALIVE response, we can send commands to 3945 uCode */
b481de9c
ZY
4606 set_bit(STATUS_ALIVE, &priv->status);
4607
4608 /* Clear out the uCode error bit if it is set */
4609 clear_bit(STATUS_FW_ERROR, &priv->status);
4610
775a6e27 4611 if (iwl_is_rfkill(priv))
b481de9c
ZY
4612 return;
4613
36d6825b 4614 ieee80211_wake_queues(priv->hw);
b481de9c
ZY
4615
4616 priv->active_rate = priv->rates_mask;
4617 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
4618
bb8c093b 4619 iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 4620
bb8c093b
CH
4621 if (iwl3945_is_associated(priv)) {
4622 struct iwl3945_rxon_cmd *active_rxon =
f2c7e521 4623 (struct iwl3945_rxon_cmd *)(&priv->active39_rxon);
b481de9c 4624
f2c7e521
AK
4625 memcpy(&priv->staging39_rxon, &priv->active39_rxon,
4626 sizeof(priv->staging39_rxon));
b481de9c
ZY
4627 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4628 } else {
4629 /* Initialize our rx_config data */
60294de3 4630 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
f2c7e521 4631 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
b481de9c
ZY
4632 }
4633
9fbab516 4634 /* Configure Bluetooth device coexistence support */
bb8c093b 4635 iwl3945_send_bt_config(priv);
b481de9c
ZY
4636
4637 /* Configure the adapter for unassociated operation */
bb8c093b 4638 iwl3945_commit_rxon(priv);
b481de9c 4639
b481de9c
ZY
4640 iwl3945_reg_txpower_periodic(priv);
4641
fe00b5a5
RC
4642 iwl3945_led_register(priv);
4643
b481de9c 4644 IWL_DEBUG_INFO("ALIVE processing complete.\n");
a9f46786 4645 set_bit(STATUS_READY, &priv->status);
5a66926a 4646 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
4647
4648 if (priv->error_recovering)
bb8c093b 4649 iwl3945_error_recovery(priv);
b481de9c 4650
9bdf5eca
MA
4651 /* reassociate for ADHOC mode */
4652 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
4653 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
4654 priv->vif);
4655 if (beacon)
4656 iwl3945_mac_beacon_update(priv->hw, beacon);
4657 }
4658
b481de9c
ZY
4659 return;
4660
4661 restart:
4662 queue_work(priv->workqueue, &priv->restart);
4663}
4664
4a8a4322 4665static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
b481de9c 4666
4a8a4322 4667static void __iwl3945_down(struct iwl_priv *priv)
b481de9c
ZY
4668{
4669 unsigned long flags;
4670 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
4671 struct ieee80211_conf *conf = NULL;
4672
4673 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
4674
4675 conf = ieee80211_get_hw_conf(priv->hw);
4676
4677 if (!exit_pending)
4678 set_bit(STATUS_EXIT_PENDING, &priv->status);
4679
ab53d8af 4680 iwl3945_led_unregister(priv);
bb8c093b 4681 iwl3945_clear_stations_table(priv);
b481de9c
ZY
4682
4683 /* Unblock any waiting calls */
4684 wake_up_interruptible_all(&priv->wait_command_queue);
4685
b481de9c
ZY
4686 /* Wipe out the EXIT_PENDING status bit if we are not actually
4687 * exiting the module */
4688 if (!exit_pending)
4689 clear_bit(STATUS_EXIT_PENDING, &priv->status);
4690
4691 /* stop and reset the on-board processor */
5d49f498 4692 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
4693
4694 /* tell the device to stop sending interrupts */
0359facc 4695 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4696 iwl3945_disable_interrupts(priv);
0359facc
MA
4697 spin_unlock_irqrestore(&priv->lock, flags);
4698 iwl_synchronize_irq(priv);
b481de9c
ZY
4699
4700 if (priv->mac80211_registered)
4701 ieee80211_stop_queues(priv->hw);
4702
bb8c093b 4703 /* If we have not previously called iwl3945_init() then
b481de9c 4704 * clear all bits but the RF Kill and SUSPEND bits and return */
775a6e27 4705 if (!iwl_is_init(priv)) {
b481de9c
ZY
4706 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
4707 STATUS_RF_KILL_HW |
4708 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
4709 STATUS_RF_KILL_SW |
9788864e
RC
4710 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
4711 STATUS_GEO_CONFIGURED |
b481de9c 4712 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
ebef2008
AK
4713 STATUS_IN_SUSPEND |
4714 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
4715 STATUS_EXIT_PENDING;
b481de9c
ZY
4716 goto exit;
4717 }
4718
4719 /* ...otherwise clear out all the status bits but the RF Kill and
4720 * SUSPEND bits and continue taking the NIC down. */
4721 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
4722 STATUS_RF_KILL_HW |
4723 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
4724 STATUS_RF_KILL_SW |
9788864e
RC
4725 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
4726 STATUS_GEO_CONFIGURED |
b481de9c
ZY
4727 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
4728 STATUS_IN_SUSPEND |
4729 test_bit(STATUS_FW_ERROR, &priv->status) <<
ebef2008
AK
4730 STATUS_FW_ERROR |
4731 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
4732 STATUS_EXIT_PENDING;
b481de9c 4733
e9414b6b 4734 priv->cfg->ops->lib->apm_ops.reset(priv);
b481de9c 4735 spin_lock_irqsave(&priv->lock, flags);
5d49f498 4736 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
4737 spin_unlock_irqrestore(&priv->lock, flags);
4738
bb8c093b
CH
4739 iwl3945_hw_txq_ctx_stop(priv);
4740 iwl3945_hw_rxq_stop(priv);
b481de9c
ZY
4741
4742 spin_lock_irqsave(&priv->lock, flags);
5d49f498
AK
4743 if (!iwl_grab_nic_access(priv)) {
4744 iwl_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 4745 APMG_CLK_VAL_DMA_CLK_RQT);
5d49f498 4746 iwl_release_nic_access(priv);
b481de9c
ZY
4747 }
4748 spin_unlock_irqrestore(&priv->lock, flags);
4749
4750 udelay(5);
4751
e9414b6b
AM
4752 if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
4753 priv->cfg->ops->lib->apm_ops.stop(priv);
4754 else
4755 priv->cfg->ops->lib->apm_ops.reset(priv);
4756
b481de9c 4757 exit:
3d24a9f7 4758 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
b481de9c
ZY
4759
4760 if (priv->ibss_beacon)
4761 dev_kfree_skb(priv->ibss_beacon);
4762 priv->ibss_beacon = NULL;
4763
4764 /* clear out any free frames */
bb8c093b 4765 iwl3945_clear_free_frames(priv);
b481de9c
ZY
4766}
4767
4a8a4322 4768static void iwl3945_down(struct iwl_priv *priv)
b481de9c
ZY
4769{
4770 mutex_lock(&priv->mutex);
bb8c093b 4771 __iwl3945_down(priv);
b481de9c 4772 mutex_unlock(&priv->mutex);
b24d22b1 4773
bb8c093b 4774 iwl3945_cancel_deferred_work(priv);
b481de9c
ZY
4775}
4776
4777#define MAX_HW_RESTARTS 5
4778
4a8a4322 4779static int __iwl3945_up(struct iwl_priv *priv)
b481de9c
ZY
4780{
4781 int rc, i;
4782
4783 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
39aadf8c 4784 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
b481de9c
ZY
4785 return -EIO;
4786 }
4787
4788 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
39aadf8c 4789 IWL_WARN(priv, "Radio disabled by SW RF kill (module "
b481de9c 4790 "parameter)\n");
e655b9f0
ZY
4791 return -ENODEV;
4792 }
4793
e903fbd4 4794 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
15b1687c 4795 IWL_ERR(priv, "ucode not available for device bring up\n");
e903fbd4
RC
4796 return -EIO;
4797 }
4798
e655b9f0 4799 /* If platform's RF_KILL switch is NOT set to KILL */
5d49f498 4800 if (iwl_read32(priv, CSR_GP_CNTRL) &
e655b9f0
ZY
4801 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
4802 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4803 else {
4804 set_bit(STATUS_RF_KILL_HW, &priv->status);
4805 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
39aadf8c 4806 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
e655b9f0
ZY
4807 return -ENODEV;
4808 }
b481de9c 4809 }
80fcc9e2 4810
5d49f498 4811 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 4812
bb8c093b 4813 rc = iwl3945_hw_nic_init(priv);
b481de9c 4814 if (rc) {
15b1687c 4815 IWL_ERR(priv, "Unable to int nic\n");
b481de9c
ZY
4816 return rc;
4817 }
4818
4819 /* make sure rfkill handshake bits are cleared */
5d49f498
AK
4820 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4821 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
4822 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4823
4824 /* clear (again), then enable host interrupts */
5d49f498 4825 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
bb8c093b 4826 iwl3945_enable_interrupts(priv);
b481de9c
ZY
4827
4828 /* really make sure rfkill handshake bits are cleared */
5d49f498
AK
4829 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4830 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
4831
4832 /* Copy original ucode data image from disk into backup cache.
4833 * This will be used to initialize the on-board processor's
4834 * data SRAM for a clean start when the runtime program first loads. */
4835 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 4836 priv->ucode_data.len);
b481de9c 4837
e655b9f0
ZY
4838 /* We return success when we resume from suspend and rf_kill is on. */
4839 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
4840 return 0;
4841
b481de9c
ZY
4842 for (i = 0; i < MAX_HW_RESTARTS; i++) {
4843
bb8c093b 4844 iwl3945_clear_stations_table(priv);
b481de9c
ZY
4845
4846 /* load bootstrap state machine,
4847 * load bootstrap program into processor's memory,
4848 * prepare to load the "initialize" uCode */
0164b9b4 4849 priv->cfg->ops->lib->load_ucode(priv);
b481de9c
ZY
4850
4851 if (rc) {
15b1687c
WT
4852 IWL_ERR(priv,
4853 "Unable to set up bootstrap uCode: %d\n", rc);
b481de9c
ZY
4854 continue;
4855 }
4856
4857 /* start card; "initialize" will load runtime ucode */
bb8c093b 4858 iwl3945_nic_start(priv);
b481de9c 4859
b481de9c
ZY
4860 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
4861
4862 return 0;
4863 }
4864
4865 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 4866 __iwl3945_down(priv);
ebef2008 4867 clear_bit(STATUS_EXIT_PENDING, &priv->status);
b481de9c
ZY
4868
4869 /* tried to restart and config the device for as long as our
4870 * patience could withstand */
15b1687c 4871 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
b481de9c
ZY
4872 return -EIO;
4873}
4874
4875
4876/*****************************************************************************
4877 *
4878 * Workqueue callbacks
4879 *
4880 *****************************************************************************/
4881
bb8c093b 4882static void iwl3945_bg_init_alive_start(struct work_struct *data)
b481de9c 4883{
4a8a4322
AK
4884 struct iwl_priv *priv =
4885 container_of(data, struct iwl_priv, init_alive_start.work);
b481de9c
ZY
4886
4887 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
4888 return;
4889
4890 mutex_lock(&priv->mutex);
bb8c093b 4891 iwl3945_init_alive_start(priv);
b481de9c
ZY
4892 mutex_unlock(&priv->mutex);
4893}
4894
bb8c093b 4895static void iwl3945_bg_alive_start(struct work_struct *data)
b481de9c 4896{
4a8a4322
AK
4897 struct iwl_priv *priv =
4898 container_of(data, struct iwl_priv, alive_start.work);
b481de9c
ZY
4899
4900 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
4901 return;
4902
4903 mutex_lock(&priv->mutex);
bb8c093b 4904 iwl3945_alive_start(priv);
b481de9c
ZY
4905 mutex_unlock(&priv->mutex);
4906}
4907
2663516d
HS
4908static void iwl3945_rfkill_poll(struct work_struct *data)
4909{
4910 struct iwl_priv *priv =
4911 container_of(data, struct iwl_priv, rfkill_poll.work);
4912 unsigned long status = priv->status;
4913
4914 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
4915 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4916 else
4917 set_bit(STATUS_RF_KILL_HW, &priv->status);
4918
4919 if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))
4920 queue_work(priv->workqueue, &priv->rf_kill);
4921
4922 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
4923 round_jiffies_relative(2 * HZ));
4924
4925}
4926
b481de9c 4927#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
bb8c093b 4928static void iwl3945_bg_request_scan(struct work_struct *data)
b481de9c 4929{
4a8a4322
AK
4930 struct iwl_priv *priv =
4931 container_of(data, struct iwl_priv, request_scan);
c2d79b48 4932 struct iwl_host_cmd cmd = {
b481de9c 4933 .id = REPLY_SCAN_CMD,
bb8c093b 4934 .len = sizeof(struct iwl3945_scan_cmd),
b481de9c
ZY
4935 .meta.flags = CMD_SIZE_HUGE,
4936 };
4937 int rc = 0;
bb8c093b 4938 struct iwl3945_scan_cmd *scan;
b481de9c 4939 struct ieee80211_conf *conf = NULL;
f9340520 4940 u8 n_probes = 2;
8318d78a 4941 enum ieee80211_band band;
9387b7ca 4942 DECLARE_SSID_BUF(ssid);
b481de9c
ZY
4943
4944 conf = ieee80211_get_hw_conf(priv->hw);
4945
4946 mutex_lock(&priv->mutex);
4947
775a6e27 4948 if (!iwl_is_ready(priv)) {
39aadf8c 4949 IWL_WARN(priv, "request scan called when driver not ready.\n");
b481de9c
ZY
4950 goto done;
4951 }
4952
a96a27f9 4953 /* Make sure the scan wasn't canceled before this queued work
b481de9c
ZY
4954 * was given the chance to run... */
4955 if (!test_bit(STATUS_SCANNING, &priv->status))
4956 goto done;
4957
4958 /* This should never be called or scheduled if there is currently
4959 * a scan active in the hardware. */
4960 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
4961 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
4962 "Ignoring second request.\n");
4963 rc = -EIO;
4964 goto done;
4965 }
4966
4967 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4968 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
4969 goto done;
4970 }
4971
4972 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4973 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
4974 goto done;
4975 }
4976
775a6e27 4977 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
4978 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
4979 goto done;
4980 }
4981
4982 if (!test_bit(STATUS_READY, &priv->status)) {
4983 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
4984 goto done;
4985 }
4986
4987 if (!priv->scan_bands) {
4988 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
4989 goto done;
4990 }
4991
805cee5b
WT
4992 if (!priv->scan) {
4993 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
b481de9c 4994 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
805cee5b 4995 if (!priv->scan) {
b481de9c
ZY
4996 rc = -ENOMEM;
4997 goto done;
4998 }
4999 }
805cee5b 5000 scan = priv->scan;
bb8c093b 5001 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
5002
5003 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
5004 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
5005
bb8c093b 5006 if (iwl3945_is_associated(priv)) {
b481de9c
ZY
5007 u16 interval = 0;
5008 u32 extra;
5009 u32 suspend_time = 100;
5010 u32 scan_suspend_time = 100;
5011 unsigned long flags;
5012
5013 IWL_DEBUG_INFO("Scanning while associated...\n");
5014
5015 spin_lock_irqsave(&priv->lock, flags);
5016 interval = priv->beacon_int;
5017 spin_unlock_irqrestore(&priv->lock, flags);
5018
5019 scan->suspend_time = 0;
15e869d8 5020 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
5021 if (!interval)
5022 interval = suspend_time;
5023 /*
5024 * suspend time format:
5025 * 0-19: beacon interval in usec (time before exec.)
5026 * 20-23: 0
5027 * 24-31: number of beacons (suspend between channels)
5028 */
5029
5030 extra = (suspend_time / interval) << 24;
5031 scan_suspend_time = 0xFF0FFFFF &
5032 (extra | ((suspend_time % interval) * 1024));
5033
5034 scan->suspend_time = cpu_to_le32(scan_suspend_time);
5035 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
5036 scan_suspend_time, interval);
5037 }
5038
5039 /* We should add the ability for user to lock to PASSIVE ONLY */
5040 if (priv->one_direct_scan) {
5041 IWL_DEBUG_SCAN
5042 ("Kicking off one direct scan for '%s'\n",
9387b7ca
JL
5043 print_ssid(ssid, priv->direct_ssid,
5044 priv->direct_ssid_len));
b481de9c
ZY
5045 scan->direct_scan[0].id = WLAN_EID_SSID;
5046 scan->direct_scan[0].len = priv->direct_ssid_len;
5047 memcpy(scan->direct_scan[0].ssid,
5048 priv->direct_ssid, priv->direct_ssid_len);
f9340520 5049 n_probes++;
f9340520 5050 } else
786b4557 5051 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
b481de9c
ZY
5052
5053 /* We don't build a direct scan probe request; the uCode will do
5054 * that based on the direct_mask added to each channel entry */
b481de9c 5055 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3832ec9d 5056 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
b481de9c
ZY
5057 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
5058
5059 /* flags + rate selection */
5060
66b5004d 5061 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
b481de9c
ZY
5062 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
5063 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
5064 scan->good_CRC_th = 0;
8318d78a 5065 band = IEEE80211_BAND_2GHZ;
66b5004d 5066 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
b481de9c
ZY
5067 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
5068 scan->good_CRC_th = IWL_GOOD_CRC_TH;
8318d78a 5069 band = IEEE80211_BAND_5GHZ;
66b5004d 5070 } else {
39aadf8c 5071 IWL_WARN(priv, "Invalid scan band count\n");
b481de9c
ZY
5072 goto done;
5073 }
5074
77fecfb8
SO
5075 scan->tx_cmd.len = cpu_to_le16(
5076 iwl_fill_probe_req(priv, band,
5077 (struct ieee80211_mgmt *)scan->data,
5078 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
5079
b481de9c
ZY
5080 /* select Rx antennas */
5081 scan->flags |= iwl3945_get_antenna_flags(priv);
5082
05c914fe 5083 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
b481de9c
ZY
5084 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
5085
f9340520
AK
5086 scan->channel_count =
5087 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
5088 n_probes,
5089 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
b481de9c 5090
14b54336
RC
5091 if (scan->channel_count == 0) {
5092 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
5093 goto done;
5094 }
5095
b481de9c 5096 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 5097 scan->channel_count * sizeof(struct iwl3945_scan_channel);
b481de9c
ZY
5098 cmd.data = scan;
5099 scan->len = cpu_to_le16(cmd.len);
5100
5101 set_bit(STATUS_SCAN_HW, &priv->status);
518099a8 5102 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
5103 if (rc)
5104 goto done;
5105
5106 queue_delayed_work(priv->workqueue, &priv->scan_check,
5107 IWL_SCAN_CHECK_WATCHDOG);
5108
5109 mutex_unlock(&priv->mutex);
5110 return;
5111
5112 done:
2420ebc1
MA
5113 /* can not perform scan make sure we clear scanning
5114 * bits from status so next scan request can be performed.
5115 * if we dont clear scanning status bit here all next scan
5116 * will fail
5117 */
5118 clear_bit(STATUS_SCAN_HW, &priv->status);
5119 clear_bit(STATUS_SCANNING, &priv->status);
5120
01ebd063 5121 /* inform mac80211 scan aborted */
b481de9c
ZY
5122 queue_work(priv->workqueue, &priv->scan_completed);
5123 mutex_unlock(&priv->mutex);
5124}
5125
bb8c093b 5126static void iwl3945_bg_up(struct work_struct *data)
b481de9c 5127{
4a8a4322 5128 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
b481de9c
ZY
5129
5130 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5131 return;
5132
5133 mutex_lock(&priv->mutex);
bb8c093b 5134 __iwl3945_up(priv);
b481de9c 5135 mutex_unlock(&priv->mutex);
c0af96a6 5136 iwl_rfkill_set_hw_state(priv);
b481de9c
ZY
5137}
5138
bb8c093b 5139static void iwl3945_bg_restart(struct work_struct *data)
b481de9c 5140{
4a8a4322 5141 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
b481de9c
ZY
5142
5143 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5144 return;
5145
bb8c093b 5146 iwl3945_down(priv);
b481de9c
ZY
5147 queue_work(priv->workqueue, &priv->up);
5148}
5149
bb8c093b 5150static void iwl3945_bg_rx_replenish(struct work_struct *data)
b481de9c 5151{
4a8a4322
AK
5152 struct iwl_priv *priv =
5153 container_of(data, struct iwl_priv, rx_replenish);
b481de9c
ZY
5154
5155 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5156 return;
5157
5158 mutex_lock(&priv->mutex);
bb8c093b 5159 iwl3945_rx_replenish(priv);
b481de9c
ZY
5160 mutex_unlock(&priv->mutex);
5161}
5162
7878a5a4
MA
5163#define IWL_DELAY_NEXT_SCAN (HZ*2)
5164
4a8a4322 5165static void iwl3945_post_associate(struct iwl_priv *priv)
b481de9c 5166{
b481de9c
ZY
5167 int rc = 0;
5168 struct ieee80211_conf *conf = NULL;
5169
05c914fe 5170 if (priv->iw_mode == NL80211_IFTYPE_AP) {
15b1687c 5171 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
b481de9c
ZY
5172 return;
5173 }
5174
5175
e174961c 5176 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
f2c7e521 5177 priv->assoc_id, priv->active39_rxon.bssid_addr);
b481de9c
ZY
5178
5179 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5180 return;
5181
322a9811 5182 if (!priv->vif || !priv->is_open)
6ef89d0a 5183 return;
322a9811 5184
af0053d6 5185 iwl_scan_cancel_timeout(priv, 200);
15e869d8 5186
b481de9c
ZY
5187 conf = ieee80211_get_hw_conf(priv->hw);
5188
f2c7e521 5189 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5190 iwl3945_commit_rxon(priv);
b481de9c 5191
28afaf91 5192 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
bb8c093b 5193 iwl3945_setup_rxon_timing(priv);
518099a8 5194 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
5195 sizeof(priv->rxon_timing), &priv->rxon_timing);
5196 if (rc)
39aadf8c 5197 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
b481de9c
ZY
5198 "Attempting to continue.\n");
5199
f2c7e521 5200 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
b481de9c 5201
f2c7e521 5202 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
b481de9c
ZY
5203
5204 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
5205 priv->assoc_id, priv->beacon_int);
5206
5207 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
f2c7e521 5208 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 5209 else
f2c7e521 5210 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 5211
f2c7e521 5212 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
b481de9c 5213 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 5214 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 5215 else
f2c7e521 5216 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 5217
05c914fe 5218 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 5219 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c
ZY
5220
5221 }
5222
bb8c093b 5223 iwl3945_commit_rxon(priv);
b481de9c
ZY
5224
5225 switch (priv->iw_mode) {
05c914fe 5226 case NL80211_IFTYPE_STATION:
bb8c093b 5227 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
5228 break;
5229
05c914fe 5230 case NL80211_IFTYPE_ADHOC:
b481de9c 5231
ce546fd2 5232 priv->assoc_id = 1;
bb8c093b 5233 iwl3945_add_station(priv, priv->bssid, 0, 0);
b481de9c 5234 iwl3945_sync_sta(priv, IWL_STA_ID,
8318d78a 5235 (priv->band == IEEE80211_BAND_5GHZ) ?
b481de9c
ZY
5236 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
5237 CMD_ASYNC);
bb8c093b
CH
5238 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
5239 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
5240
5241 break;
5242
5243 default:
15b1687c 5244 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3ac7f146 5245 __func__, priv->iw_mode);
b481de9c
ZY
5246 break;
5247 }
5248
bb8c093b 5249 iwl3945_activate_qos(priv, 0);
292ae174 5250
7878a5a4
MA
5251 /* we have just associated, don't start scan too early */
5252 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
cd56d331
AK
5253}
5254
e8975581 5255static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
76bb77e0 5256
b481de9c
ZY
5257/*****************************************************************************
5258 *
5259 * mac80211 entry point functions
5260 *
5261 *****************************************************************************/
5262
5a66926a
ZY
5263#define UCODE_READY_TIMEOUT (2 * HZ)
5264
bb8c093b 5265static int iwl3945_mac_start(struct ieee80211_hw *hw)
b481de9c 5266{
4a8a4322 5267 struct iwl_priv *priv = hw->priv;
5a66926a 5268 int ret;
b481de9c
ZY
5269
5270 IWL_DEBUG_MAC80211("enter\n");
5271
5272 /* we should be verifying the device is ready to be opened */
5273 mutex_lock(&priv->mutex);
5274
f2c7e521 5275 memset(&priv->staging39_rxon, 0, sizeof(struct iwl3945_rxon_cmd));
5a66926a
ZY
5276 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
5277 * ucode filename and max sizes are card-specific. */
5278
5279 if (!priv->ucode_code.len) {
5280 ret = iwl3945_read_ucode(priv);
5281 if (ret) {
15b1687c 5282 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
5a66926a
ZY
5283 mutex_unlock(&priv->mutex);
5284 goto out_release_irq;
5285 }
5286 }
b481de9c 5287
e655b9f0 5288 ret = __iwl3945_up(priv);
b481de9c
ZY
5289
5290 mutex_unlock(&priv->mutex);
5a66926a 5291
c0af96a6 5292 iwl_rfkill_set_hw_state(priv);
80fcc9e2 5293
e655b9f0
ZY
5294 if (ret)
5295 goto out_release_irq;
5296
5297 IWL_DEBUG_INFO("Start UP work.\n");
5298
5299 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
5300 return 0;
5301
5a66926a
ZY
5302 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
5303 * mac80211 will not be run successfully. */
5304 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
5305 test_bit(STATUS_READY, &priv->status),
5306 UCODE_READY_TIMEOUT);
5307 if (!ret) {
5308 if (!test_bit(STATUS_READY, &priv->status)) {
15b1687c
WT
5309 IWL_ERR(priv,
5310 "Wait for START_ALIVE timeout after %dms.\n",
5311 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5a66926a
ZY
5312 ret = -ETIMEDOUT;
5313 goto out_release_irq;
5314 }
5315 }
5316
2663516d
HS
5317 /* ucode is running and will send rfkill notifications,
5318 * no need to poll the killswitch state anymore */
5319 cancel_delayed_work(&priv->rfkill_poll);
5320
e655b9f0 5321 priv->is_open = 1;
b481de9c
ZY
5322 IWL_DEBUG_MAC80211("leave\n");
5323 return 0;
5a66926a
ZY
5324
5325out_release_irq:
e655b9f0
ZY
5326 priv->is_open = 0;
5327 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 5328 return ret;
b481de9c
ZY
5329}
5330
bb8c093b 5331static void iwl3945_mac_stop(struct ieee80211_hw *hw)
b481de9c 5332{
4a8a4322 5333 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5334
5335 IWL_DEBUG_MAC80211("enter\n");
6ef89d0a 5336
e655b9f0
ZY
5337 if (!priv->is_open) {
5338 IWL_DEBUG_MAC80211("leave - skip\n");
5339 return;
5340 }
5341
b481de9c 5342 priv->is_open = 0;
5a66926a 5343
775a6e27 5344 if (iwl_is_ready_rf(priv)) {
e655b9f0
ZY
5345 /* stop mac, cancel any scan request and clear
5346 * RXON_FILTER_ASSOC_MSK BIT
5347 */
5a66926a 5348 mutex_lock(&priv->mutex);
af0053d6 5349 iwl_scan_cancel_timeout(priv, 100);
fde3571f 5350 mutex_unlock(&priv->mutex);
fde3571f
MA
5351 }
5352
5a66926a
ZY
5353 iwl3945_down(priv);
5354
5355 flush_workqueue(priv->workqueue);
2663516d
HS
5356
5357 /* start polling the killswitch state again */
5358 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5359 round_jiffies_relative(2 * HZ));
6ef89d0a 5360
b481de9c 5361 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5362}
5363
e039fa4a 5364static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
b481de9c 5365{
4a8a4322 5366 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5367
5368 IWL_DEBUG_MAC80211("enter\n");
5369
b481de9c 5370 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
e039fa4a 5371 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
b481de9c 5372
e039fa4a 5373 if (iwl3945_tx_skb(priv, skb))
b481de9c
ZY
5374 dev_kfree_skb_any(skb);
5375
5376 IWL_DEBUG_MAC80211("leave\n");
637f8837 5377 return NETDEV_TX_OK;
b481de9c
ZY
5378}
5379
bb8c093b 5380static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
5381 struct ieee80211_if_init_conf *conf)
5382{
4a8a4322 5383 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5384 unsigned long flags;
5385
32bfd35d 5386 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 5387
32bfd35d
JB
5388 if (priv->vif) {
5389 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
864792e3 5390 return -EOPNOTSUPP;
b481de9c
ZY
5391 }
5392
5393 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 5394 priv->vif = conf->vif;
60294de3 5395 priv->iw_mode = conf->type;
b481de9c
ZY
5396
5397 spin_unlock_irqrestore(&priv->lock, flags);
5398
5399 mutex_lock(&priv->mutex);
864792e3
TW
5400
5401 if (conf->mac_addr) {
e174961c 5402 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr);
864792e3
TW
5403 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
5404 }
5405
775a6e27 5406 if (iwl_is_ready(priv))
5a66926a 5407 iwl3945_set_mode(priv, conf->type);
b481de9c 5408
b481de9c
ZY
5409 mutex_unlock(&priv->mutex);
5410
5a66926a 5411 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5412 return 0;
5413}
5414
5415/**
bb8c093b 5416 * iwl3945_mac_config - mac80211 config callback
b481de9c
ZY
5417 *
5418 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
5419 * be set inappropriately and the driver currently sets the hardware up to
5420 * use it whenever needed.
5421 */
e8975581 5422static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
b481de9c 5423{
4a8a4322 5424 struct iwl_priv *priv = hw->priv;
d20b3c65 5425 const struct iwl_channel_info *ch_info;
e8975581 5426 struct ieee80211_conf *conf = &hw->conf;
b481de9c 5427 unsigned long flags;
76bb77e0 5428 int ret = 0;
b481de9c
ZY
5429
5430 mutex_lock(&priv->mutex);
8318d78a 5431 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
b481de9c 5432
775a6e27 5433 if (!iwl_is_ready(priv)) {
b481de9c 5434 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
5435 ret = -EIO;
5436 goto out;
b481de9c
ZY
5437 }
5438
df878d8f 5439 if (unlikely(!iwl3945_mod_params.disable_hw_scan &&
b481de9c 5440 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
5441 IWL_DEBUG_MAC80211("leave - scanning\n");
5442 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 5443 mutex_unlock(&priv->mutex);
a0646470 5444 return 0;
b481de9c
ZY
5445 }
5446
5447 spin_lock_irqsave(&priv->lock, flags);
5448
8318d78a
JB
5449 ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
5450 conf->channel->hw_value);
b481de9c 5451 if (!is_channel_valid(ch_info)) {
66b5004d 5452 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
8318d78a 5453 conf->channel->hw_value, conf->channel->band);
b481de9c
ZY
5454 IWL_DEBUG_MAC80211("leave - invalid channel\n");
5455 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
5456 ret = -EINVAL;
5457 goto out;
b481de9c
ZY
5458 }
5459
8318d78a 5460 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value);
b481de9c 5461
8318d78a 5462 iwl3945_set_flags_for_phymode(priv, conf->channel->band);
b481de9c
ZY
5463
5464 /* The list of supported rates and rate mask can be different
5465 * for each phymode; since the phymode may have changed, reset
5466 * the rate mask to what mac80211 lists */
bb8c093b 5467 iwl3945_set_rate(priv);
b481de9c
ZY
5468
5469 spin_unlock_irqrestore(&priv->lock, flags);
5470
5471#ifdef IEEE80211_CONF_CHANNEL_SWITCH
5472 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 5473 iwl3945_hw_channel_switch(priv, conf->channel);
76bb77e0 5474 goto out;
b481de9c
ZY
5475 }
5476#endif
5477
bb8c093b 5478 iwl3945_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
5479
5480 if (!conf->radio_enabled) {
5481 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 5482 goto out;
b481de9c
ZY
5483 }
5484
775a6e27 5485 if (iwl_is_rfkill(priv)) {
b481de9c 5486 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
5487 ret = -EIO;
5488 goto out;
b481de9c
ZY
5489 }
5490
bb8c093b 5491 iwl3945_set_rate(priv);
b481de9c 5492
f2c7e521
AK
5493 if (memcmp(&priv->active39_rxon,
5494 &priv->staging39_rxon, sizeof(priv->staging39_rxon)))
bb8c093b 5495 iwl3945_commit_rxon(priv);
b481de9c
ZY
5496 else
5497 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
5498
5499 IWL_DEBUG_MAC80211("leave\n");
5500
76bb77e0 5501out:
a0646470 5502 clear_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 5503 mutex_unlock(&priv->mutex);
76bb77e0 5504 return ret;
b481de9c
ZY
5505}
5506
4a8a4322 5507static void iwl3945_config_ap(struct iwl_priv *priv)
b481de9c
ZY
5508{
5509 int rc = 0;
5510
d986bcd1 5511 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
b481de9c
ZY
5512 return;
5513
5514 /* The following should be done only at AP bring up */
5d1e2325 5515 if (!(iwl3945_is_associated(priv))) {
b481de9c
ZY
5516
5517 /* RXON - unassoc (to set timing command) */
f2c7e521 5518 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5519 iwl3945_commit_rxon(priv);
b481de9c
ZY
5520
5521 /* RXON Timing */
28afaf91 5522 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
bb8c093b 5523 iwl3945_setup_rxon_timing(priv);
518099a8
SO
5524 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
5525 sizeof(priv->rxon_timing),
5526 &priv->rxon_timing);
b481de9c 5527 if (rc)
39aadf8c 5528 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
b481de9c
ZY
5529 "Attempting to continue.\n");
5530
5531 /* FIXME: what should be the assoc_id for AP? */
f2c7e521 5532 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
b481de9c 5533 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
f2c7e521 5534 priv->staging39_rxon.flags |=
b481de9c
ZY
5535 RXON_FLG_SHORT_PREAMBLE_MSK;
5536 else
f2c7e521 5537 priv->staging39_rxon.flags &=
b481de9c
ZY
5538 ~RXON_FLG_SHORT_PREAMBLE_MSK;
5539
f2c7e521 5540 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
b481de9c
ZY
5541 if (priv->assoc_capability &
5542 WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 5543 priv->staging39_rxon.flags |=
b481de9c
ZY
5544 RXON_FLG_SHORT_SLOT_MSK;
5545 else
f2c7e521 5546 priv->staging39_rxon.flags &=
b481de9c
ZY
5547 ~RXON_FLG_SHORT_SLOT_MSK;
5548
05c914fe 5549 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 5550 priv->staging39_rxon.flags &=
b481de9c
ZY
5551 ~RXON_FLG_SHORT_SLOT_MSK;
5552 }
5553 /* restore RXON assoc */
f2c7e521 5554 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 5555 iwl3945_commit_rxon(priv);
b5323d36 5556 iwl3945_add_station(priv, iwl_bcast_addr, 0, 0);
556f8db7 5557 }
bb8c093b 5558 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
5559
5560 /* FIXME - we need to add code here to detect a totally new
5561 * configuration, reset the AP, unassoc, rxon timing, assoc,
5562 * clear sta table, add BCAST sta... */
5563}
5564
32bfd35d
JB
5565static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
5566 struct ieee80211_vif *vif,
4a8a4322 5567 struct ieee80211_if_conf *conf)
b481de9c 5568{
4a8a4322 5569 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5570 int rc;
5571
5572 if (conf == NULL)
5573 return -EIO;
5574
b716bb91
EG
5575 if (priv->vif != vif) {
5576 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b716bb91
EG
5577 return 0;
5578 }
5579
9d139c81 5580 /* handle this temporarily here */
05c914fe 5581 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
9d139c81
JB
5582 conf->changed & IEEE80211_IFCC_BEACON) {
5583 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
5584 if (!beacon)
5585 return -ENOMEM;
9bdf5eca 5586 mutex_lock(&priv->mutex);
9d139c81 5587 rc = iwl3945_mac_beacon_update(hw, beacon);
9bdf5eca 5588 mutex_unlock(&priv->mutex);
9d139c81
JB
5589 if (rc)
5590 return rc;
5591 }
5592
775a6e27 5593 if (!iwl_is_alive(priv))
5a66926a
ZY
5594 return -EAGAIN;
5595
b481de9c
ZY
5596 mutex_lock(&priv->mutex);
5597
b481de9c 5598 if (conf->bssid)
e174961c 5599 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
b481de9c 5600
4150c572
JB
5601/*
5602 * very dubious code was here; the probe filtering flag is never set:
5603 *
b481de9c
ZY
5604 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
5605 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572 5606 */
b481de9c 5607
05c914fe 5608 if (priv->iw_mode == NL80211_IFTYPE_AP) {
b481de9c
ZY
5609 if (!conf->bssid) {
5610 conf->bssid = priv->mac_addr;
5611 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
e174961c
JB
5612 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
5613 conf->bssid);
b481de9c
ZY
5614 }
5615 if (priv->ibss_beacon)
5616 dev_kfree_skb(priv->ibss_beacon);
5617
9d139c81 5618 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
b481de9c
ZY
5619 }
5620
775a6e27 5621 if (iwl_is_rfkill(priv))
fde3571f
MA
5622 goto done;
5623
b481de9c
ZY
5624 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
5625 !is_multicast_ether_addr(conf->bssid)) {
5626 /* If there is currently a HW scan going on in the background
5627 * then we need to cancel it else the RXON below will fail. */
af0053d6 5628 if (iwl_scan_cancel_timeout(priv, 100)) {
39aadf8c 5629 IWL_WARN(priv, "Aborted scan still in progress "
b481de9c
ZY
5630 "after 100ms\n");
5631 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
5632 mutex_unlock(&priv->mutex);
5633 return -EAGAIN;
5634 }
f2c7e521 5635 memcpy(priv->staging39_rxon.bssid_addr, conf->bssid, ETH_ALEN);
b481de9c
ZY
5636
5637 /* TODO: Audit driver for usage of these members and see
5638 * if mac80211 deprecates them (priv->bssid looks like it
5639 * shouldn't be there, but I haven't scanned the IBSS code
5640 * to verify) - jpk */
5641 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
5642
05c914fe 5643 if (priv->iw_mode == NL80211_IFTYPE_AP)
bb8c093b 5644 iwl3945_config_ap(priv);
b481de9c 5645 else {
bb8c093b 5646 rc = iwl3945_commit_rxon(priv);
05c914fe 5647 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
bb8c093b 5648 iwl3945_add_station(priv,
f2c7e521 5649 priv->active39_rxon.bssid_addr, 1, 0);
b481de9c
ZY
5650 }
5651
5652 } else {
af0053d6 5653 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 5654 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5655 iwl3945_commit_rxon(priv);
b481de9c
ZY
5656 }
5657
fde3571f 5658 done:
b481de9c
ZY
5659 IWL_DEBUG_MAC80211("leave\n");
5660 mutex_unlock(&priv->mutex);
5661
5662 return 0;
5663}
5664
bb8c093b 5665static void iwl3945_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
5666 unsigned int changed_flags,
5667 unsigned int *total_flags,
5668 int mc_count, struct dev_addr_list *mc_list)
5669{
4a8a4322 5670 struct iwl_priv *priv = hw->priv;
f2c7e521 5671 __le32 *filter_flags = &priv->staging39_rxon.filter_flags;
25b3f57c 5672
352bc8de
ZY
5673 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
5674 changed_flags, *total_flags);
5675
5676 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
5677 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
5678 *filter_flags |= RXON_FILTER_PROMISC_MSK;
5679 else
5680 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
5681 }
5682 if (changed_flags & FIF_ALLMULTI) {
5683 if (*total_flags & FIF_ALLMULTI)
5684 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
5685 else
5686 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
5687 }
5688 if (changed_flags & FIF_CONTROL) {
5689 if (*total_flags & FIF_CONTROL)
5690 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
5691 else
5692 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
5ec03976 5693 }
352bc8de
ZY
5694 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
5695 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
5696 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
5697 else
5698 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
5699 }
5700
5701 /* We avoid iwl_commit_rxon here to commit the new filter flags
5702 * since mac80211 will call ieee80211_hw_config immediately.
5703 * (mc_list is not supported at this time). Otherwise, we need to
5704 * queue a background iwl_commit_rxon work.
5705 */
5706
5707 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
25b3f57c 5708 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
4150c572
JB
5709}
5710
bb8c093b 5711static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
5712 struct ieee80211_if_init_conf *conf)
5713{
4a8a4322 5714 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5715
5716 IWL_DEBUG_MAC80211("enter\n");
5717
5718 mutex_lock(&priv->mutex);
6ef89d0a 5719
775a6e27 5720 if (iwl_is_ready_rf(priv)) {
af0053d6 5721 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 5722 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
fde3571f
MA
5723 iwl3945_commit_rxon(priv);
5724 }
32bfd35d
JB
5725 if (priv->vif == conf->vif) {
5726 priv->vif = NULL;
b481de9c 5727 memset(priv->bssid, 0, ETH_ALEN);
b481de9c
ZY
5728 }
5729 mutex_unlock(&priv->mutex);
5730
5731 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5732}
5733
cd56d331
AK
5734#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
5735
5736static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
5737 struct ieee80211_vif *vif,
5738 struct ieee80211_bss_conf *bss_conf,
5739 u32 changes)
5740{
4a8a4322 5741 struct iwl_priv *priv = hw->priv;
cd56d331
AK
5742
5743 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
5744
5745 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5746 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
5747 bss_conf->use_short_preamble);
5748 if (bss_conf->use_short_preamble)
f2c7e521 5749 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
cd56d331 5750 else
f2c7e521 5751 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
cd56d331
AK
5752 }
5753
5754 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5755 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5756 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
f2c7e521 5757 priv->staging39_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
cd56d331 5758 else
f2c7e521 5759 priv->staging39_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
cd56d331
AK
5760 }
5761
5762 if (changes & BSS_CHANGED_ASSOC) {
5763 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
5764 /* This should never happen as this function should
5765 * never be called from interrupt context. */
5766 if (WARN_ON_ONCE(in_interrupt()))
5767 return;
5768 if (bss_conf->assoc) {
5769 priv->assoc_id = bss_conf->aid;
5770 priv->beacon_int = bss_conf->beacon_int;
28afaf91 5771 priv->timestamp = bss_conf->timestamp;
cd56d331 5772 priv->assoc_capability = bss_conf->assoc_capability;
3dae0c42 5773 priv->power_data.dtim_period = bss_conf->dtim_period;
cd56d331
AK
5774 priv->next_scan_jiffies = jiffies +
5775 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
5776 mutex_lock(&priv->mutex);
5777 iwl3945_post_associate(priv);
5778 mutex_unlock(&priv->mutex);
5779 } else {
5780 priv->assoc_id = 0;
5781 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
5782 }
5783 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
5784 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
5785 iwl3945_send_rxon_assoc(priv);
5786 }
5787
5788}
5789
bb8c093b 5790static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
5791{
5792 int rc = 0;
5793 unsigned long flags;
4a8a4322 5794 struct iwl_priv *priv = hw->priv;
9387b7ca 5795 DECLARE_SSID_BUF(ssid_buf);
b481de9c
ZY
5796
5797 IWL_DEBUG_MAC80211("enter\n");
5798
15e869d8 5799 mutex_lock(&priv->mutex);
b481de9c
ZY
5800 spin_lock_irqsave(&priv->lock, flags);
5801
775a6e27 5802 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
5803 rc = -EIO;
5804 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
5805 goto out_unlock;
5806 }
5807
7878a5a4
MA
5808 /* we don't schedule scan within next_scan_jiffies period */
5809 if (priv->next_scan_jiffies &&
5810 time_after(priv->next_scan_jiffies, jiffies)) {
5811 rc = -EAGAIN;
5812 goto out_unlock;
5813 }
15dbf1b7
BM
5814 /* if we just finished scan ask for delay for a broadcast scan */
5815 if ((len == 0) && priv->last_scan_jiffies &&
5816 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
5817 jiffies)) {
b481de9c
ZY
5818 rc = -EAGAIN;
5819 goto out_unlock;
5820 }
5821 if (len) {
7878a5a4 5822 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
9387b7ca 5823 print_ssid(ssid_buf, ssid, len), (int)len);
b481de9c
ZY
5824
5825 priv->one_direct_scan = 1;
5826 priv->direct_ssid_len = (u8)
5827 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
5828 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
6ef89d0a
MA
5829 } else
5830 priv->one_direct_scan = 0;
b481de9c 5831
bb8c093b 5832 rc = iwl3945_scan_initiate(priv);
b481de9c
ZY
5833
5834 IWL_DEBUG_MAC80211("leave\n");
5835
5836out_unlock:
5837 spin_unlock_irqrestore(&priv->lock, flags);
15e869d8 5838 mutex_unlock(&priv->mutex);
b481de9c
ZY
5839
5840 return rc;
5841}
5842
bb8c093b 5843static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dc822b5d
JB
5844 struct ieee80211_vif *vif,
5845 struct ieee80211_sta *sta,
5846 struct ieee80211_key_conf *key)
b481de9c 5847{
4a8a4322 5848 struct iwl_priv *priv = hw->priv;
dc822b5d 5849 const u8 *addr;
42986796 5850 int ret;
b481de9c
ZY
5851 u8 sta_id;
5852
5853 IWL_DEBUG_MAC80211("enter\n");
5854
df878d8f 5855 if (iwl3945_mod_params.sw_crypto) {
b481de9c
ZY
5856 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
5857 return -EOPNOTSUPP;
5858 }
5859
42986796 5860 addr = sta ? sta->addr : iwl_bcast_addr;
bb8c093b 5861 sta_id = iwl3945_hw_find_station(priv, addr);
b481de9c 5862 if (sta_id == IWL_INVALID_STATION) {
e174961c
JB
5863 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
5864 addr);
b481de9c
ZY
5865 return -EINVAL;
5866 }
5867
5868 mutex_lock(&priv->mutex);
5869
af0053d6 5870 iwl_scan_cancel_timeout(priv, 100);
15e869d8 5871
b481de9c
ZY
5872 switch (cmd) {
5873 case SET_KEY:
42986796
WT
5874 ret = iwl3945_update_sta_key_info(priv, key, sta_id);
5875 if (!ret) {
bb8c093b
CH
5876 iwl3945_set_rxon_hwcrypto(priv, 1);
5877 iwl3945_commit_rxon(priv);
b481de9c
ZY
5878 key->hw_key_idx = sta_id;
5879 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
5880 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
5881 }
5882 break;
5883 case DISABLE_KEY:
42986796
WT
5884 ret = iwl3945_clear_sta_key_info(priv, sta_id);
5885 if (!ret) {
bb8c093b
CH
5886 iwl3945_set_rxon_hwcrypto(priv, 0);
5887 iwl3945_commit_rxon(priv);
b481de9c
ZY
5888 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
5889 }
5890 break;
5891 default:
42986796 5892 ret = -EINVAL;
b481de9c
ZY
5893 }
5894
5895 IWL_DEBUG_MAC80211("leave\n");
5896 mutex_unlock(&priv->mutex);
5897
42986796 5898 return ret;
b481de9c
ZY
5899}
5900
e100bb64 5901static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
b481de9c
ZY
5902 const struct ieee80211_tx_queue_params *params)
5903{
4a8a4322 5904 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5905 unsigned long flags;
5906 int q;
b481de9c
ZY
5907
5908 IWL_DEBUG_MAC80211("enter\n");
5909
775a6e27 5910 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
5911 IWL_DEBUG_MAC80211("leave - RF not ready\n");
5912 return -EIO;
5913 }
5914
5915 if (queue >= AC_NUM) {
5916 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
5917 return 0;
5918 }
5919
b481de9c
ZY
5920 q = AC_NUM - 1 - queue;
5921
5922 spin_lock_irqsave(&priv->lock, flags);
5923
5924 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
5925 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
5926 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
5927 priv->qos_data.def_qos_parm.ac[q].edca_txop =
3330d7be 5928 cpu_to_le16((params->txop * 32));
b481de9c
ZY
5929
5930 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
5931 priv->qos_data.qos_active = 1;
5932
5933 spin_unlock_irqrestore(&priv->lock, flags);
5934
5935 mutex_lock(&priv->mutex);
05c914fe 5936 if (priv->iw_mode == NL80211_IFTYPE_AP)
bb8c093b
CH
5937 iwl3945_activate_qos(priv, 1);
5938 else if (priv->assoc_id && iwl3945_is_associated(priv))
5939 iwl3945_activate_qos(priv, 0);
b481de9c
ZY
5940
5941 mutex_unlock(&priv->mutex);
5942
b481de9c
ZY
5943 IWL_DEBUG_MAC80211("leave\n");
5944 return 0;
5945}
5946
bb8c093b 5947static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
5948 struct ieee80211_tx_queue_stats *stats)
5949{
4a8a4322 5950 struct iwl_priv *priv = hw->priv;
b481de9c 5951 int i, avail;
188cf6c7 5952 struct iwl_tx_queue *txq;
d20b3c65 5953 struct iwl_queue *q;
b481de9c
ZY
5954 unsigned long flags;
5955
5956 IWL_DEBUG_MAC80211("enter\n");
5957
775a6e27 5958 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
5959 IWL_DEBUG_MAC80211("leave - RF not ready\n");
5960 return -EIO;
5961 }
5962
5963 spin_lock_irqsave(&priv->lock, flags);
5964
5965 for (i = 0; i < AC_NUM; i++) {
188cf6c7 5966 txq = &priv->txq[i];
b481de9c 5967 q = &txq->q;
d20b3c65 5968 avail = iwl_queue_space(q);
b481de9c 5969
57ffc589
JB
5970 stats[i].len = q->n_window - avail;
5971 stats[i].limit = q->n_window - q->high_mark;
5972 stats[i].count = q->n_window;
b481de9c
ZY
5973
5974 }
5975 spin_unlock_irqrestore(&priv->lock, flags);
5976
5977 IWL_DEBUG_MAC80211("leave\n");
5978
5979 return 0;
5980}
5981
bb8c093b 5982static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 5983{
4a8a4322 5984 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5985 unsigned long flags;
5986
5987 mutex_lock(&priv->mutex);
5988 IWL_DEBUG_MAC80211("enter\n");
5989
775a6e27 5990 iwl_reset_qos(priv);
292ae174 5991
b481de9c
ZY
5992 spin_lock_irqsave(&priv->lock, flags);
5993 priv->assoc_id = 0;
5994 priv->assoc_capability = 0;
b481de9c
ZY
5995
5996 /* new association get rid of ibss beacon skb */
5997 if (priv->ibss_beacon)
5998 dev_kfree_skb(priv->ibss_beacon);
5999
6000 priv->ibss_beacon = NULL;
6001
6002 priv->beacon_int = priv->hw->conf.beacon_int;
28afaf91 6003 priv->timestamp = 0;
05c914fe 6004 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
b481de9c
ZY
6005 priv->beacon_int = 0;
6006
6007 spin_unlock_irqrestore(&priv->lock, flags);
6008
775a6e27 6009 if (!iwl_is_ready_rf(priv)) {
fde3571f
MA
6010 IWL_DEBUG_MAC80211("leave - not ready\n");
6011 mutex_unlock(&priv->mutex);
6012 return;
6013 }
6014
15e869d8
MA
6015 /* we are restarting association process
6016 * clear RXON_FILTER_ASSOC_MSK bit
6017 */
05c914fe 6018 if (priv->iw_mode != NL80211_IFTYPE_AP) {
af0053d6 6019 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 6020 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6021 iwl3945_commit_rxon(priv);
15e869d8
MA
6022 }
6023
b481de9c 6024 /* Per mac80211.h: This is only used in IBSS mode... */
05c914fe 6025 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
15e869d8 6026
b481de9c
ZY
6027 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
6028 mutex_unlock(&priv->mutex);
6029 return;
b481de9c
ZY
6030 }
6031
bb8c093b 6032 iwl3945_set_rate(priv);
b481de9c
ZY
6033
6034 mutex_unlock(&priv->mutex);
6035
6036 IWL_DEBUG_MAC80211("leave\n");
6037
6038}
6039
e039fa4a 6040static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
b481de9c 6041{
4a8a4322 6042 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6043 unsigned long flags;
6044
b481de9c
ZY
6045 IWL_DEBUG_MAC80211("enter\n");
6046
775a6e27 6047 if (!iwl_is_ready_rf(priv)) {
b481de9c 6048 IWL_DEBUG_MAC80211("leave - RF not ready\n");
b481de9c
ZY
6049 return -EIO;
6050 }
6051
05c914fe 6052 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
b481de9c 6053 IWL_DEBUG_MAC80211("leave - not IBSS\n");
b481de9c
ZY
6054 return -EIO;
6055 }
6056
6057 spin_lock_irqsave(&priv->lock, flags);
6058
6059 if (priv->ibss_beacon)
6060 dev_kfree_skb(priv->ibss_beacon);
6061
6062 priv->ibss_beacon = skb;
6063
6064 priv->assoc_id = 0;
6065
6066 IWL_DEBUG_MAC80211("leave\n");
6067 spin_unlock_irqrestore(&priv->lock, flags);
6068
775a6e27 6069 iwl_reset_qos(priv);
b481de9c 6070
dc4b1e7d 6071 iwl3945_post_associate(priv);
b481de9c 6072
b481de9c
ZY
6073
6074 return 0;
6075}
6076
6077/*****************************************************************************
6078 *
6079 * sysfs attributes
6080 *
6081 *****************************************************************************/
6082
c8b0e6e1 6083#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
6084
6085/*
6086 * The following adds a new attribute to the sysfs representation
6087 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
6088 * used for controlling the debug level.
6089 *
6090 * See the level definitions in iwl for details.
6091 */
40b8ec0b
SO
6092static ssize_t show_debug_level(struct device *d,
6093 struct device_attribute *attr, char *buf)
b481de9c 6094{
4a8a4322 6095 struct iwl_priv *priv = d->driver_data;
40b8ec0b
SO
6096
6097 return sprintf(buf, "0x%08X\n", priv->debug_level);
b481de9c 6098}
40b8ec0b
SO
6099static ssize_t store_debug_level(struct device *d,
6100 struct device_attribute *attr,
b481de9c
ZY
6101 const char *buf, size_t count)
6102{
4a8a4322 6103 struct iwl_priv *priv = d->driver_data;
40b8ec0b
SO
6104 unsigned long val;
6105 int ret;
b481de9c 6106
40b8ec0b
SO
6107 ret = strict_strtoul(buf, 0, &val);
6108 if (ret)
978785a3 6109 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
b481de9c 6110 else
40b8ec0b 6111 priv->debug_level = val;
b481de9c
ZY
6112
6113 return strnlen(buf, count);
6114}
6115
40b8ec0b
SO
6116static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
6117 show_debug_level, store_debug_level);
b481de9c 6118
c8b0e6e1 6119#endif /* CONFIG_IWL3945_DEBUG */
b481de9c 6120
b481de9c
ZY
6121static ssize_t show_temperature(struct device *d,
6122 struct device_attribute *attr, char *buf)
6123{
4a8a4322 6124 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 6125
775a6e27 6126 if (!iwl_is_alive(priv))
b481de9c
ZY
6127 return -EAGAIN;
6128
bb8c093b 6129 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
b481de9c
ZY
6130}
6131
6132static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
6133
b481de9c
ZY
6134static ssize_t show_tx_power(struct device *d,
6135 struct device_attribute *attr, char *buf)
6136{
4a8a4322 6137 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
62ea9c5b 6138 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
b481de9c
ZY
6139}
6140
6141static ssize_t store_tx_power(struct device *d,
6142 struct device_attribute *attr,
6143 const char *buf, size_t count)
6144{
4a8a4322 6145 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6146 char *p = (char *)buf;
6147 u32 val;
6148
6149 val = simple_strtoul(p, &p, 10);
6150 if (p == buf)
978785a3 6151 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
b481de9c 6152 else
bb8c093b 6153 iwl3945_hw_reg_set_txpower(priv, val);
b481de9c
ZY
6154
6155 return count;
6156}
6157
6158static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
6159
6160static ssize_t show_flags(struct device *d,
6161 struct device_attribute *attr, char *buf)
6162{
4a8a4322 6163 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 6164
f2c7e521 6165 return sprintf(buf, "0x%04X\n", priv->active39_rxon.flags);
b481de9c
ZY
6166}
6167
6168static ssize_t store_flags(struct device *d,
6169 struct device_attribute *attr,
6170 const char *buf, size_t count)
6171{
4a8a4322 6172 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6173 u32 flags = simple_strtoul(buf, NULL, 0);
6174
6175 mutex_lock(&priv->mutex);
f2c7e521 6176 if (le32_to_cpu(priv->staging39_rxon.flags) != flags) {
b481de9c 6177 /* Cancel any currently running scans... */
af0053d6 6178 if (iwl_scan_cancel_timeout(priv, 100))
39aadf8c 6179 IWL_WARN(priv, "Could not cancel scan.\n");
b481de9c
ZY
6180 else {
6181 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
6182 flags);
f2c7e521 6183 priv->staging39_rxon.flags = cpu_to_le32(flags);
bb8c093b 6184 iwl3945_commit_rxon(priv);
b481de9c
ZY
6185 }
6186 }
6187 mutex_unlock(&priv->mutex);
6188
6189 return count;
6190}
6191
6192static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
6193
6194static ssize_t show_filter_flags(struct device *d,
6195 struct device_attribute *attr, char *buf)
6196{
4a8a4322 6197 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6198
6199 return sprintf(buf, "0x%04X\n",
f2c7e521 6200 le32_to_cpu(priv->active39_rxon.filter_flags));
b481de9c
ZY
6201}
6202
6203static ssize_t store_filter_flags(struct device *d,
6204 struct device_attribute *attr,
6205 const char *buf, size_t count)
6206{
4a8a4322 6207 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6208 u32 filter_flags = simple_strtoul(buf, NULL, 0);
6209
6210 mutex_lock(&priv->mutex);
f2c7e521 6211 if (le32_to_cpu(priv->staging39_rxon.filter_flags) != filter_flags) {
b481de9c 6212 /* Cancel any currently running scans... */
af0053d6 6213 if (iwl_scan_cancel_timeout(priv, 100))
39aadf8c 6214 IWL_WARN(priv, "Could not cancel scan.\n");
b481de9c
ZY
6215 else {
6216 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
6217 "0x%04X\n", filter_flags);
f2c7e521 6218 priv->staging39_rxon.filter_flags =
b481de9c 6219 cpu_to_le32(filter_flags);
bb8c093b 6220 iwl3945_commit_rxon(priv);
b481de9c
ZY
6221 }
6222 }
6223 mutex_unlock(&priv->mutex);
6224
6225 return count;
6226}
6227
6228static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
6229 store_filter_flags);
6230
c8b0e6e1 6231#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
6232
6233static ssize_t show_measurement(struct device *d,
6234 struct device_attribute *attr, char *buf)
6235{
4a8a4322 6236 struct iwl_priv *priv = dev_get_drvdata(d);
600c0e11 6237 struct iwl_spectrum_notification measure_report;
b481de9c 6238 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3ac7f146 6239 u8 *data = (u8 *)&measure_report;
b481de9c
ZY
6240 unsigned long flags;
6241
6242 spin_lock_irqsave(&priv->lock, flags);
6243 if (!(priv->measurement_status & MEASUREMENT_READY)) {
6244 spin_unlock_irqrestore(&priv->lock, flags);
6245 return 0;
6246 }
6247 memcpy(&measure_report, &priv->measure_report, size);
6248 priv->measurement_status = 0;
6249 spin_unlock_irqrestore(&priv->lock, flags);
6250
6251 while (size && (PAGE_SIZE - len)) {
6252 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6253 PAGE_SIZE - len, 1);
6254 len = strlen(buf);
6255 if (PAGE_SIZE - len)
6256 buf[len++] = '\n';
6257
6258 ofs += 16;
6259 size -= min(size, 16U);
6260 }
6261
6262 return len;
6263}
6264
6265static ssize_t store_measurement(struct device *d,
6266 struct device_attribute *attr,
6267 const char *buf, size_t count)
6268{
4a8a4322 6269 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 6270 struct ieee80211_measurement_params params = {
f2c7e521 6271 .channel = le16_to_cpu(priv->active39_rxon.channel),
b481de9c
ZY
6272 .start_time = cpu_to_le64(priv->last_tsf),
6273 .duration = cpu_to_le16(1),
6274 };
6275 u8 type = IWL_MEASURE_BASIC;
6276 u8 buffer[32];
6277 u8 channel;
6278
6279 if (count) {
6280 char *p = buffer;
6281 strncpy(buffer, buf, min(sizeof(buffer), count));
6282 channel = simple_strtoul(p, NULL, 0);
6283 if (channel)
6284 params.channel = channel;
6285
6286 p = buffer;
6287 while (*p && *p != ' ')
6288 p++;
6289 if (*p)
6290 type = simple_strtoul(p + 1, NULL, 0);
6291 }
6292
6293 IWL_DEBUG_INFO("Invoking measurement of type %d on "
6294 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 6295 iwl3945_get_measurement(priv, &params, type);
b481de9c
ZY
6296
6297 return count;
6298}
6299
6300static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
6301 show_measurement, store_measurement);
c8b0e6e1 6302#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
b481de9c 6303
b481de9c
ZY
6304static ssize_t store_retry_rate(struct device *d,
6305 struct device_attribute *attr,
6306 const char *buf, size_t count)
6307{
4a8a4322 6308 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6309
6310 priv->retry_rate = simple_strtoul(buf, NULL, 0);
6311 if (priv->retry_rate <= 0)
6312 priv->retry_rate = 1;
6313
6314 return count;
6315}
6316
6317static ssize_t show_retry_rate(struct device *d,
6318 struct device_attribute *attr, char *buf)
6319{
4a8a4322 6320 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6321 return sprintf(buf, "%d", priv->retry_rate);
6322}
6323
6324static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
6325 store_retry_rate);
6326
6327static ssize_t store_power_level(struct device *d,
6328 struct device_attribute *attr,
6329 const char *buf, size_t count)
6330{
4a8a4322 6331 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6332 int rc;
6333 int mode;
6334
6335 mode = simple_strtoul(buf, NULL, 0);
6336 mutex_lock(&priv->mutex);
6337
775a6e27 6338 if (!iwl_is_ready(priv)) {
b481de9c
ZY
6339 rc = -EAGAIN;
6340 goto out;
6341 }
6342
1125eff3
SO
6343 if ((mode < 1) || (mode > IWL39_POWER_LIMIT) ||
6344 (mode == IWL39_POWER_AC))
6345 mode = IWL39_POWER_AC;
b481de9c
ZY
6346 else
6347 mode |= IWL_POWER_ENABLED;
6348
6349 if (mode != priv->power_mode) {
bb8c093b 6350 rc = iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
6351 if (rc) {
6352 IWL_DEBUG_MAC80211("failed setting power mode.\n");
6353 goto out;
6354 }
6355 priv->power_mode = mode;
6356 }
6357
6358 rc = count;
6359
6360 out:
6361 mutex_unlock(&priv->mutex);
6362 return rc;
6363}
6364
6365#define MAX_WX_STRING 80
6366
6367/* Values are in microsecond */
6368static const s32 timeout_duration[] = {
6369 350000,
6370 250000,
6371 75000,
6372 37000,
6373 25000,
6374};
6375static const s32 period_duration[] = {
6376 400000,
6377 700000,
6378 1000000,
6379 1000000,
6380 1000000
6381};
6382
6383static ssize_t show_power_level(struct device *d,
6384 struct device_attribute *attr, char *buf)
6385{
4a8a4322 6386 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6387 int level = IWL_POWER_LEVEL(priv->power_mode);
6388 char *p = buf;
6389
6390 p += sprintf(p, "%d ", level);
6391 switch (level) {
6392 case IWL_POWER_MODE_CAM:
1125eff3 6393 case IWL39_POWER_AC:
b481de9c
ZY
6394 p += sprintf(p, "(AC)");
6395 break;
1125eff3 6396 case IWL39_POWER_BATTERY:
b481de9c
ZY
6397 p += sprintf(p, "(BATTERY)");
6398 break;
6399 default:
6400 p += sprintf(p,
6401 "(Timeout %dms, Period %dms)",
6402 timeout_duration[level - 1] / 1000,
6403 period_duration[level - 1] / 1000);
6404 }
6405
6406 if (!(priv->power_mode & IWL_POWER_ENABLED))
6407 p += sprintf(p, " OFF\n");
6408 else
6409 p += sprintf(p, " \n");
6410
3ac7f146 6411 return p - buf + 1;
b481de9c
ZY
6412
6413}
6414
6415static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
6416 store_power_level);
6417
6418static ssize_t show_channels(struct device *d,
6419 struct device_attribute *attr, char *buf)
6420{
8318d78a
JB
6421 /* all this shit doesn't belong into sysfs anyway */
6422 return 0;
b481de9c
ZY
6423}
6424
6425static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
6426
6427static ssize_t show_statistics(struct device *d,
6428 struct device_attribute *attr, char *buf)
6429{
4a8a4322 6430 struct iwl_priv *priv = dev_get_drvdata(d);
bb8c093b 6431 u32 size = sizeof(struct iwl3945_notif_statistics);
b481de9c 6432 u32 len = 0, ofs = 0;
f2c7e521 6433 u8 *data = (u8 *)&priv->statistics_39;
b481de9c
ZY
6434 int rc = 0;
6435
775a6e27 6436 if (!iwl_is_alive(priv))
b481de9c
ZY
6437 return -EAGAIN;
6438
6439 mutex_lock(&priv->mutex);
bb8c093b 6440 rc = iwl3945_send_statistics_request(priv);
b481de9c
ZY
6441 mutex_unlock(&priv->mutex);
6442
6443 if (rc) {
6444 len = sprintf(buf,
6445 "Error sending statistics request: 0x%08X\n", rc);
6446 return len;
6447 }
6448
6449 while (size && (PAGE_SIZE - len)) {
6450 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6451 PAGE_SIZE - len, 1);
6452 len = strlen(buf);
6453 if (PAGE_SIZE - len)
6454 buf[len++] = '\n';
6455
6456 ofs += 16;
6457 size -= min(size, 16U);
6458 }
6459
6460 return len;
6461}
6462
6463static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
6464
6465static ssize_t show_antenna(struct device *d,
6466 struct device_attribute *attr, char *buf)
6467{
4a8a4322 6468 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 6469
775a6e27 6470 if (!iwl_is_alive(priv))
b481de9c
ZY
6471 return -EAGAIN;
6472
6473 return sprintf(buf, "%d\n", priv->antenna);
6474}
6475
6476static ssize_t store_antenna(struct device *d,
6477 struct device_attribute *attr,
6478 const char *buf, size_t count)
6479{
6480 int ant;
4a8a4322 6481 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6482
6483 if (count == 0)
6484 return 0;
6485
6486 if (sscanf(buf, "%1i", &ant) != 1) {
6487 IWL_DEBUG_INFO("not in hex or decimal form.\n");
6488 return count;
6489 }
6490
6491 if ((ant >= 0) && (ant <= 2)) {
6492 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 6493 priv->antenna = (enum iwl3945_antenna)ant;
b481de9c
ZY
6494 } else
6495 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
6496
6497
6498 return count;
6499}
6500
6501static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
6502
6503static ssize_t show_status(struct device *d,
6504 struct device_attribute *attr, char *buf)
6505{
4a8a4322 6506 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
775a6e27 6507 if (!iwl_is_alive(priv))
b481de9c
ZY
6508 return -EAGAIN;
6509 return sprintf(buf, "0x%08x\n", (int)priv->status);
6510}
6511
6512static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
6513
6514static ssize_t dump_error_log(struct device *d,
6515 struct device_attribute *attr,
6516 const char *buf, size_t count)
6517{
6518 char *p = (char *)buf;
6519
6520 if (p[0] == '1')
4a8a4322 6521 iwl3945_dump_nic_error_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
6522
6523 return strnlen(buf, count);
6524}
6525
6526static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
6527
6528static ssize_t dump_event_log(struct device *d,
6529 struct device_attribute *attr,
6530 const char *buf, size_t count)
6531{
6532 char *p = (char *)buf;
6533
6534 if (p[0] == '1')
4a8a4322 6535 iwl3945_dump_nic_event_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
6536
6537 return strnlen(buf, count);
6538}
6539
6540static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
6541
6542/*****************************************************************************
6543 *
a96a27f9 6544 * driver setup and tear down
b481de9c
ZY
6545 *
6546 *****************************************************************************/
6547
4a8a4322 6548static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
b481de9c
ZY
6549{
6550 priv->workqueue = create_workqueue(DRV_NAME);
6551
6552 init_waitqueue_head(&priv->wait_command_queue);
6553
bb8c093b
CH
6554 INIT_WORK(&priv->up, iwl3945_bg_up);
6555 INIT_WORK(&priv->restart, iwl3945_bg_restart);
6556 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
c0af96a6 6557 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
bb8c093b 6558 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
bb8c093b
CH
6559 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
6560 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
2663516d 6561 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
77fecfb8
SO
6562 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
6563 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
6564 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
6565 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
bb8c093b
CH
6566
6567 iwl3945_hw_setup_deferred_work(priv);
b481de9c
ZY
6568
6569 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 6570 iwl3945_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
6571}
6572
4a8a4322 6573static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
b481de9c 6574{
bb8c093b 6575 iwl3945_hw_cancel_deferred_work(priv);
b481de9c 6576
e47eb6ad 6577 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
6578 cancel_delayed_work(&priv->scan_check);
6579 cancel_delayed_work(&priv->alive_start);
b481de9c
ZY
6580 cancel_work_sync(&priv->beacon_update);
6581}
6582
bb8c093b 6583static struct attribute *iwl3945_sysfs_entries[] = {
b481de9c
ZY
6584 &dev_attr_antenna.attr,
6585 &dev_attr_channels.attr,
6586 &dev_attr_dump_errors.attr,
6587 &dev_attr_dump_events.attr,
6588 &dev_attr_flags.attr,
6589 &dev_attr_filter_flags.attr,
c8b0e6e1 6590#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
6591 &dev_attr_measurement.attr,
6592#endif
6593 &dev_attr_power_level.attr,
b481de9c 6594 &dev_attr_retry_rate.attr,
b481de9c
ZY
6595 &dev_attr_statistics.attr,
6596 &dev_attr_status.attr,
6597 &dev_attr_temperature.attr,
b481de9c 6598 &dev_attr_tx_power.attr,
40b8ec0b
SO
6599#ifdef CONFIG_IWL3945_DEBUG
6600 &dev_attr_debug_level.attr,
6601#endif
b481de9c
ZY
6602 NULL
6603};
6604
bb8c093b 6605static struct attribute_group iwl3945_attribute_group = {
b481de9c 6606 .name = NULL, /* put in device directory */
bb8c093b 6607 .attrs = iwl3945_sysfs_entries,
b481de9c
ZY
6608};
6609
bb8c093b
CH
6610static struct ieee80211_ops iwl3945_hw_ops = {
6611 .tx = iwl3945_mac_tx,
6612 .start = iwl3945_mac_start,
6613 .stop = iwl3945_mac_stop,
6614 .add_interface = iwl3945_mac_add_interface,
6615 .remove_interface = iwl3945_mac_remove_interface,
6616 .config = iwl3945_mac_config,
6617 .config_interface = iwl3945_mac_config_interface,
6618 .configure_filter = iwl3945_configure_filter,
6619 .set_key = iwl3945_mac_set_key,
bb8c093b
CH
6620 .get_tx_stats = iwl3945_mac_get_tx_stats,
6621 .conf_tx = iwl3945_mac_conf_tx,
bb8c093b 6622 .reset_tsf = iwl3945_mac_reset_tsf,
cd56d331 6623 .bss_info_changed = iwl3945_bss_info_changed,
bb8c093b 6624 .hw_scan = iwl3945_mac_hw_scan
b481de9c
ZY
6625};
6626
e52119c5 6627static int iwl3945_init_drv(struct iwl_priv *priv)
90a30a02
KA
6628{
6629 int ret;
6630
6631 priv->retry_rate = 1;
6632 priv->ibss_beacon = NULL;
6633
6634 spin_lock_init(&priv->lock);
3dae0c42 6635 spin_lock_init(&priv->power_data.lock);
90a30a02
KA
6636 spin_lock_init(&priv->sta_lock);
6637 spin_lock_init(&priv->hcmd_lock);
6638
6639 INIT_LIST_HEAD(&priv->free_frames);
6640
6641 mutex_init(&priv->mutex);
6642
6643 /* Clear the driver's (not device's) station table */
6644 iwl3945_clear_stations_table(priv);
6645
6646 priv->data_retry_limit = -1;
6647 priv->ieee_channels = NULL;
6648 priv->ieee_rates = NULL;
6649 priv->band = IEEE80211_BAND_2GHZ;
6650
6651 priv->iw_mode = NL80211_IFTYPE_STATION;
6652
6653 iwl_reset_qos(priv);
6654
6655 priv->qos_data.qos_active = 0;
6656 priv->qos_data.qos_cap.val = 0;
6657
6658 priv->rates_mask = IWL_RATES_MASK;
6659 /* If power management is turned on, default to AC mode */
c7a7c8ec 6660 priv->power_mode = IWL39_POWER_AC;
62ea9c5b 6661 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
90a30a02
KA
6662
6663 ret = iwl3945_init_channel_map(priv);
6664 if (ret) {
6665 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
6666 goto err;
6667 }
6668
6669 ret = iwl3945_init_geos(priv);
6670 if (ret) {
6671 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
6672 goto err_free_channel_map;
6673 }
6674
6675 return 0;
6676
6677err_free_channel_map:
6678 iwl3945_free_channel_map(priv);
6679err:
6680 return ret;
6681}
6682
bb8c093b 6683static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
6684{
6685 int err = 0;
4a8a4322 6686 struct iwl_priv *priv;
b481de9c 6687 struct ieee80211_hw *hw;
c0f20d91 6688 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
0359facc 6689 unsigned long flags;
b481de9c 6690
cee53ddb
KA
6691 /***********************
6692 * 1. Allocating HW data
6693 * ********************/
6694
b481de9c
ZY
6695 /* mac80211 allocates memory for this device instance, including
6696 * space for this driver's private structure */
90a30a02 6697 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
b481de9c 6698 if (hw == NULL) {
a3139c59 6699 printk(KERN_ERR DRV_NAME "Can not allocate network device\n");
b481de9c
ZY
6700 err = -ENOMEM;
6701 goto out;
6702 }
b481de9c 6703 priv = hw->priv;
90a30a02 6704 SET_IEEE80211_DEV(hw, &pdev->dev);
6440adb5 6705
df878d8f
KA
6706 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
6707 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) {
15b1687c
WT
6708 IWL_ERR(priv,
6709 "invalid queues_num, should be between %d and %d\n",
6710 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
a3139c59
SO
6711 err = -EINVAL;
6712 goto out;
6713 }
6714
90a30a02
KA
6715 /*
6716 * Disabling hardware scan means that mac80211 will perform scans
6717 * "the hard way", rather than using device's scan.
6718 */
df878d8f 6719 if (iwl3945_mod_params.disable_hw_scan) {
40b8ec0b
SO
6720 IWL_DEBUG_INFO("Disabling hw_scan\n");
6721 iwl3945_hw_ops.hw_scan = NULL;
6722 }
6723
90a30a02 6724
cee53ddb 6725 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
90a30a02
KA
6726 priv->cfg = cfg;
6727 priv->pci_dev = pdev;
cee53ddb 6728
c8b0e6e1 6729#ifdef CONFIG_IWL3945_DEBUG
df878d8f 6730 priv->debug_level = iwl3945_mod_params.debug;
b481de9c
ZY
6731 atomic_set(&priv->restrict_refcnt, 0);
6732#endif
90a30a02
KA
6733 hw->rate_control_algorithm = "iwl-3945-rs";
6734 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
6735
6736 /* Select antenna (may be helpful if only one antenna is connected) */
6737 priv->antenna = (enum iwl3945_antenna)iwl3945_mod_params.antenna;
b481de9c 6738
566bfe5a 6739 /* Tell mac80211 our characteristics */
605a0bd6 6740 hw->flags = IEEE80211_HW_SIGNAL_DBM |
566bfe5a 6741 IEEE80211_HW_NOISE_DBM;
b481de9c 6742
f59ac048 6743 hw->wiphy->interface_modes =
f59ac048
LR
6744 BIT(NL80211_IFTYPE_STATION) |
6745 BIT(NL80211_IFTYPE_ADHOC);
6746
2a44f911 6747 hw->wiphy->custom_regulatory = true;
ea4a82dc 6748
6440adb5 6749 /* 4 EDCA QOS priorities */
b481de9c
ZY
6750 hw->queues = 4;
6751
cee53ddb
KA
6752 /***************************
6753 * 2. Initializing PCI bus
6754 * *************************/
b481de9c
ZY
6755 if (pci_enable_device(pdev)) {
6756 err = -ENODEV;
6757 goto out_ieee80211_free_hw;
6758 }
6759
6760 pci_set_master(pdev);
6761
b481de9c
ZY
6762 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6763 if (!err)
6764 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
6765 if (err) {
978785a3 6766 IWL_WARN(priv, "No suitable DMA available.\n");
b481de9c
ZY
6767 goto out_pci_disable_device;
6768 }
6769
6770 pci_set_drvdata(pdev, priv);
6771 err = pci_request_regions(pdev, DRV_NAME);
6772 if (err)
6773 goto out_pci_disable_device;
6440adb5 6774
cee53ddb
KA
6775 /***********************
6776 * 3. Read REV Register
6777 * ********************/
b481de9c
ZY
6778 priv->hw_base = pci_iomap(pdev, 0, 0);
6779 if (!priv->hw_base) {
6780 err = -ENODEV;
6781 goto out_pci_release_regions;
6782 }
6783
6784 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
6785 (unsigned long long) pci_resource_len(pdev, 0));
6786 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
6787
cee53ddb
KA
6788 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6789 * PCI Tx retries from interfering with C3 CPU state */
6790 pci_write_config_byte(pdev, 0x41, 0x00);
b481de9c 6791
90a30a02
KA
6792 /* amp init */
6793 err = priv->cfg->ops->lib->apm_ops.init(priv);
cee53ddb 6794 if (err < 0) {
90a30a02
KA
6795 IWL_DEBUG_INFO("Failed to init APMG\n");
6796 goto out_iounmap;
cee53ddb 6797 }
b481de9c 6798
cee53ddb
KA
6799 /***********************
6800 * 4. Read EEPROM
6801 * ********************/
90a30a02 6802
cee53ddb
KA
6803 /* Read the EEPROM */
6804 err = iwl3945_eeprom_init(priv);
6805 if (err) {
15b1687c 6806 IWL_ERR(priv, "Unable to init EEPROM\n");
cee53ddb
KA
6807 goto out_remove_sysfs;
6808 }
6809 /* MAC Address location in EEPROM same for 3945/4965 */
6810 get_eeprom_mac(priv, priv->mac_addr);
6811 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
6812 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
b481de9c 6813
cee53ddb
KA
6814 /***********************
6815 * 5. Setup HW Constants
6816 * ********************/
b481de9c 6817 /* Device-specific setup */
3832ec9d 6818 if (iwl3945_hw_set_hw_params(priv)) {
15b1687c 6819 IWL_ERR(priv, "failed to set hw settings\n");
b481de9c
ZY
6820 goto out_iounmap;
6821 }
6822
cee53ddb
KA
6823 /***********************
6824 * 6. Setup priv
6825 * ********************/
cee53ddb 6826
90a30a02 6827 err = iwl3945_init_drv(priv);
b481de9c 6828 if (err) {
90a30a02
KA
6829 IWL_ERR(priv, "initializing driver failed\n");
6830 goto out_free_geos;
b481de9c
ZY
6831 }
6832
978785a3
TW
6833 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
6834 priv->cfg->name);
cee53ddb
KA
6835
6836 /***********************************
6837 * 7. Initialize Module Parameters
6838 * **********************************/
6839
6840 /* Initialize module parameter values here */
6841 /* Disable radio (SW RF KILL) via parameter when loading driver */
df878d8f 6842 if (iwl3945_mod_params.disable) {
cee53ddb
KA
6843 set_bit(STATUS_RF_KILL_SW, &priv->status);
6844 IWL_DEBUG_INFO("Radio disabled.\n");
849e0dce
RC
6845 }
6846
cee53ddb
KA
6847
6848 /***********************
6849 * 8. Setup Services
6850 * ********************/
6851
6852 spin_lock_irqsave(&priv->lock, flags);
6853 iwl3945_disable_interrupts(priv);
6854 spin_unlock_irqrestore(&priv->lock, flags);
6855
2663516d
HS
6856 pci_enable_msi(priv->pci_dev);
6857
6858 err = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
6859 DRV_NAME, priv);
6860 if (err) {
6861 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
6862 goto out_disable_msi;
6863 }
6864
cee53ddb 6865 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
849e0dce 6866 if (err) {
15b1687c 6867 IWL_ERR(priv, "failed to create sysfs device attributes\n");
90a30a02 6868 goto out_release_irq;
849e0dce 6869 }
849e0dce 6870
cee53ddb
KA
6871 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
6872 iwl3945_setup_deferred_work(priv);
6873 iwl3945_setup_rx_handlers(priv);
6874
cee53ddb 6875 /*********************************
2663516d 6876 * 9. Setup and Register mac80211
cee53ddb
KA
6877 * *******************************/
6878
5a66926a
ZY
6879 err = ieee80211_register_hw(priv->hw);
6880 if (err) {
15b1687c 6881 IWL_ERR(priv, "Failed to register network device: %d\n", err);
cee53ddb 6882 goto out_remove_sysfs;
5a66926a 6883 }
b481de9c 6884
5a66926a
ZY
6885 priv->hw->conf.beacon_int = 100;
6886 priv->mac80211_registered = 1;
cee53ddb 6887
c0af96a6 6888 err = iwl_rfkill_init(priv);
ebef2008 6889 if (err)
15b1687c 6890 IWL_ERR(priv, "Unable to initialize RFKILL system. "
ebef2008
AK
6891 "Ignoring error: %d\n", err);
6892
2663516d
HS
6893 /* Start monitoring the killswitch */
6894 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
6895 2 * HZ);
6896
b481de9c
ZY
6897 return 0;
6898
cee53ddb
KA
6899 out_remove_sysfs:
6900 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
849e0dce
RC
6901 out_free_geos:
6902 iwl3945_free_geos(priv);
b481de9c
ZY
6903
6904 out_release_irq:
2663516d 6905 free_irq(priv->pci_dev->irq, priv);
b481de9c
ZY
6906 destroy_workqueue(priv->workqueue);
6907 priv->workqueue = NULL;
3832ec9d 6908 iwl3945_unset_hw_params(priv);
2663516d
HS
6909 out_disable_msi:
6910 pci_disable_msi(priv->pci_dev);
b481de9c
ZY
6911 out_iounmap:
6912 pci_iounmap(pdev, priv->hw_base);
6913 out_pci_release_regions:
6914 pci_release_regions(pdev);
6915 out_pci_disable_device:
6916 pci_disable_device(pdev);
6917 pci_set_drvdata(pdev, NULL);
6918 out_ieee80211_free_hw:
6919 ieee80211_free_hw(priv->hw);
6920 out:
6921 return err;
6922}
6923
c83dbf68 6924static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
b481de9c 6925{
4a8a4322 6926 struct iwl_priv *priv = pci_get_drvdata(pdev);
0359facc 6927 unsigned long flags;
b481de9c
ZY
6928
6929 if (!priv)
6930 return;
6931
6932 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
6933
b481de9c 6934 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 6935
d552bfb6
KA
6936 if (priv->mac80211_registered) {
6937 ieee80211_unregister_hw(priv->hw);
6938 priv->mac80211_registered = 0;
6939 } else {
6940 iwl3945_down(priv);
6941 }
b481de9c 6942
0359facc
MA
6943 /* make sure we flush any pending irq or
6944 * tasklet for the driver
6945 */
6946 spin_lock_irqsave(&priv->lock, flags);
6947 iwl3945_disable_interrupts(priv);
6948 spin_unlock_irqrestore(&priv->lock, flags);
6949
6950 iwl_synchronize_irq(priv);
6951
bb8c093b 6952 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
b481de9c 6953
c0af96a6 6954 iwl_rfkill_unregister(priv);
2663516d
HS
6955 cancel_delayed_work(&priv->rfkill_poll);
6956
bb8c093b 6957 iwl3945_dealloc_ucode_pci(priv);
b481de9c
ZY
6958
6959 if (priv->rxq.bd)
51af3d3f 6960 iwl_rx_queue_free(priv, &priv->rxq);
bb8c093b 6961 iwl3945_hw_txq_ctx_free(priv);
b481de9c 6962
3832ec9d 6963 iwl3945_unset_hw_params(priv);
bb8c093b 6964 iwl3945_clear_stations_table(priv);
b481de9c 6965
6ef89d0a
MA
6966 /*netif_stop_queue(dev); */
6967 flush_workqueue(priv->workqueue);
6968
bb8c093b 6969 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
b481de9c
ZY
6970 * priv->workqueue... so we can't take down the workqueue
6971 * until now... */
6972 destroy_workqueue(priv->workqueue);
6973 priv->workqueue = NULL;
6974
2663516d
HS
6975 free_irq(pdev->irq, priv);
6976 pci_disable_msi(pdev);
6977
b481de9c
ZY
6978 pci_iounmap(pdev, priv->hw_base);
6979 pci_release_regions(pdev);
6980 pci_disable_device(pdev);
6981 pci_set_drvdata(pdev, NULL);
6982
849e0dce
RC
6983 iwl3945_free_channel_map(priv);
6984 iwl3945_free_geos(priv);
805cee5b 6985 kfree(priv->scan);
b481de9c
ZY
6986 if (priv->ibss_beacon)
6987 dev_kfree_skb(priv->ibss_beacon);
6988
6989 ieee80211_free_hw(priv->hw);
6990}
6991
6992#ifdef CONFIG_PM
6993
bb8c093b 6994static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 6995{
4a8a4322 6996 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 6997
e655b9f0
ZY
6998 if (priv->is_open) {
6999 set_bit(STATUS_IN_SUSPEND, &priv->status);
7000 iwl3945_mac_stop(priv->hw);
7001 priv->is_open = 1;
7002 }
2663516d
HS
7003 pci_save_state(pdev);
7004 pci_disable_device(pdev);
b481de9c
ZY
7005 pci_set_power_state(pdev, PCI_D3hot);
7006
b481de9c
ZY
7007 return 0;
7008}
7009
bb8c093b 7010static int iwl3945_pci_resume(struct pci_dev *pdev)
b481de9c 7011{
4a8a4322 7012 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 7013
b481de9c 7014 pci_set_power_state(pdev, PCI_D0);
2663516d
HS
7015 pci_enable_device(pdev);
7016 pci_restore_state(pdev);
b481de9c 7017
e655b9f0
ZY
7018 if (priv->is_open)
7019 iwl3945_mac_start(priv->hw);
b481de9c 7020
e655b9f0 7021 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
7022 return 0;
7023}
7024
7025#endif /* CONFIG_PM */
7026
7027/*****************************************************************************
7028 *
7029 * driver and module entry point
7030 *
7031 *****************************************************************************/
7032
bb8c093b 7033static struct pci_driver iwl3945_driver = {
b481de9c 7034 .name = DRV_NAME,
bb8c093b
CH
7035 .id_table = iwl3945_hw_card_ids,
7036 .probe = iwl3945_pci_probe,
7037 .remove = __devexit_p(iwl3945_pci_remove),
b481de9c 7038#ifdef CONFIG_PM
bb8c093b
CH
7039 .suspend = iwl3945_pci_suspend,
7040 .resume = iwl3945_pci_resume,
b481de9c
ZY
7041#endif
7042};
7043
bb8c093b 7044static int __init iwl3945_init(void)
b481de9c
ZY
7045{
7046
7047 int ret;
7048 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7049 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
897e1cf2
RC
7050
7051 ret = iwl3945_rate_control_register();
7052 if (ret) {
a3139c59
SO
7053 printk(KERN_ERR DRV_NAME
7054 "Unable to register rate control algorithm: %d\n", ret);
897e1cf2
RC
7055 return ret;
7056 }
7057
bb8c093b 7058 ret = pci_register_driver(&iwl3945_driver);
b481de9c 7059 if (ret) {
a3139c59 7060 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n");
897e1cf2 7061 goto error_register;
b481de9c 7062 }
b481de9c
ZY
7063
7064 return ret;
897e1cf2 7065
897e1cf2
RC
7066error_register:
7067 iwl3945_rate_control_unregister();
7068 return ret;
b481de9c
ZY
7069}
7070
bb8c093b 7071static void __exit iwl3945_exit(void)
b481de9c 7072{
bb8c093b 7073 pci_unregister_driver(&iwl3945_driver);
897e1cf2 7074 iwl3945_rate_control_unregister();
b481de9c
ZY
7075}
7076
a0987a8d 7077MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
25cb6cad 7078
df878d8f 7079module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
b481de9c 7080MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
df878d8f 7081module_param_named(disable, iwl3945_mod_params.disable, int, 0444);
b481de9c 7082MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
9c74d9fb
SO
7083module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
7084MODULE_PARM_DESC(swcrypto,
7085 "using software crypto (default 1 [software])\n");
df878d8f 7086module_param_named(debug, iwl3945_mod_params.debug, uint, 0444);
b481de9c 7087MODULE_PARM_DESC(debug, "debug output mask");
df878d8f 7088module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444);
b481de9c
ZY
7089MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
7090
df878d8f 7091module_param_named(queues_num, iwl3945_mod_params.num_of_queues, int, 0444);
b481de9c
ZY
7092MODULE_PARM_DESC(queues_num, "number of hw queues.");
7093
bb8c093b
CH
7094module_exit(iwl3945_exit);
7095module_init(iwl3945_init);