iwl3945: Use iwlcore scan code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/pci.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/delay.h>
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/wireless.h>
39 #include <linux/firmware.h>
40 #include <linux/etherdevice.h>
41 #include <linux/if_arp.h>
42
43 #include <net/ieee80211_radiotap.h>
44 #include <net/lib80211.h>
45 #include <net/mac80211.h>
46
47 #include <asm/div64.h>
48
49 #define DRV_NAME "iwl3945"
50
51 #include "iwl-fh.h"
52 #include "iwl-3945-fh.h"
53 #include "iwl-commands.h"
54 #include "iwl-3945.h"
55 #include "iwl-helpers.h"
56 #include "iwl-core.h"
57 #include "iwl-dev.h"
58
59 /*
60 * module name, copyright, version, etc.
61 */
62
63 #define DRV_DESCRIPTION \
64 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
66 #ifdef CONFIG_IWL3945_DEBUG
67 #define VD "d"
68 #else
69 #define VD
70 #endif
71
72 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
73 #define VS "s"
74 #else
75 #define VS
76 #endif
77
78 #define IWL39_VERSION "1.2.26k" VD VS
79 #define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
80 #define DRV_AUTHOR "<ilw@linux.intel.com>"
81 #define DRV_VERSION IWL39_VERSION
82
83
84 MODULE_DESCRIPTION(DRV_DESCRIPTION);
85 MODULE_VERSION(DRV_VERSION);
86 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87 MODULE_LICENSE("GPL");
88
89 /* module parameters */
90 struct iwl_mod_params iwl3945_mod_params = {
91 .num_of_queues = IWL39_MAX_NUM_QUEUES,
92 .sw_crypto = 1,
93 /* the rest are 0 by default */
94 };
95
96 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
97 * DMA services
98 *
99 * Theory of operation
100 *
101 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
102 * of buffer descriptors, each of which points to one or more data buffers for
103 * the device to read from or fill. Driver and device exchange status of each
104 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
105 * entries in each circular buffer, to protect against confusing empty and full
106 * queue states.
107 *
108 * The device reads or writes the data in the queues via the device's several
109 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
110 *
111 * For Tx queue, there are low mark and high mark limits. If, after queuing
112 * the packet for Tx, free space become < low mark, Tx queue stopped. When
113 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
114 * Tx queue resumed.
115 *
116 * The 3945 operates with six queues: One receive queue, one transmit queue
117 * (#4) for sending commands to the device firmware, and four transmit queues
118 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
119 ***************************************************/
120
121 /**
122 * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
123 */
124 static int iwl3945_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
125 int count, int slots_num, u32 id)
126 {
127 q->n_bd = count;
128 q->n_window = slots_num;
129 q->id = id;
130
131 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
132 * and iwl_queue_dec_wrap are broken. */
133 BUG_ON(!is_power_of_2(count));
134
135 /* slots_num must be power-of-two size, otherwise
136 * get_cmd_index is broken. */
137 BUG_ON(!is_power_of_2(slots_num));
138
139 q->low_mark = q->n_window / 4;
140 if (q->low_mark < 4)
141 q->low_mark = 4;
142
143 q->high_mark = q->n_window / 8;
144 if (q->high_mark < 2)
145 q->high_mark = 2;
146
147 q->write_ptr = q->read_ptr = 0;
148
149 return 0;
150 }
151
152 /**
153 * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
154 */
155 static int iwl3945_tx_queue_alloc(struct iwl_priv *priv,
156 struct iwl_tx_queue *txq, u32 id)
157 {
158 struct pci_dev *dev = priv->pci_dev;
159
160 /* Driver private data, only for Tx (not command) queues,
161 * not shared with device. */
162 if (id != IWL_CMD_QUEUE_NUM) {
163 txq->txb = kmalloc(sizeof(txq->txb[0]) *
164 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
165 if (!txq->txb) {
166 IWL_ERR(priv, "kmalloc for auxiliary BD "
167 "structures failed\n");
168 goto error;
169 }
170 } else
171 txq->txb = NULL;
172
173 /* Circular buffer of transmit frame descriptors (TFDs),
174 * shared with device */
175 txq->tfds39 = pci_alloc_consistent(dev,
176 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX,
177 &txq->q.dma_addr);
178
179 if (!txq->tfds39) {
180 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n",
181 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX);
182 goto error;
183 }
184 txq->q.id = id;
185
186 return 0;
187
188 error:
189 kfree(txq->txb);
190 txq->txb = NULL;
191
192 return -ENOMEM;
193 }
194
195 /**
196 * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
197 */
198 int iwl3945_tx_queue_init(struct iwl_priv *priv,
199 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
200 {
201 int len, i;
202 int rc = 0;
203
204 /*
205 * Alloc buffer array for commands (Tx or other types of commands).
206 * For the command queue (#4), allocate command space + one big
207 * command for scan, since scan command is very huge; the system will
208 * not have two scans at the same time, so only one is needed.
209 * For data Tx queues (all other queues), no super-size command
210 * space is needed.
211 */
212 len = sizeof(struct iwl_cmd);
213 for (i = 0; i <= slots_num; i++) {
214 if (i == slots_num) {
215 if (txq_id == IWL_CMD_QUEUE_NUM)
216 len += IWL_MAX_SCAN_SIZE;
217 else
218 continue;
219 }
220
221 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
222 if (!txq->cmd[i])
223 goto err;
224 }
225
226 /* Alloc driver data array and TFD circular buffer */
227 rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
228 if (rc)
229 goto err;
230
231 txq->need_update = 0;
232
233 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
234 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
235 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
236
237 /* Initialize queue high/low-water, head/tail indexes */
238 iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
239
240 /* Tell device where to find queue, enable DMA channel. */
241 iwl3945_hw_tx_queue_init(priv, txq);
242
243 return 0;
244 err:
245 for (i = 0; i < slots_num; i++) {
246 kfree(txq->cmd[i]);
247 txq->cmd[i] = NULL;
248 }
249
250 if (txq_id == IWL_CMD_QUEUE_NUM) {
251 kfree(txq->cmd[slots_num]);
252 txq->cmd[slots_num] = NULL;
253 }
254 return -ENOMEM;
255 }
256
257 /**
258 * iwl3945_tx_queue_free - Deallocate DMA queue.
259 * @txq: Transmit queue to deallocate.
260 *
261 * Empty queue by removing and destroying all BD's.
262 * Free all buffers.
263 * 0-fill, but do not free "txq" descriptor structure.
264 */
265 void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
266 {
267 struct iwl_queue *q = &txq->q;
268 struct pci_dev *dev = priv->pci_dev;
269 int len, i;
270
271 if (q->n_bd == 0)
272 return;
273
274 /* first, empty all BD's */
275 for (; q->write_ptr != q->read_ptr;
276 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
277 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
278
279 len = sizeof(struct iwl_cmd) * q->n_window;
280 if (q->id == IWL_CMD_QUEUE_NUM)
281 len += IWL_MAX_SCAN_SIZE;
282
283 /* De-alloc array of command/tx buffers */
284 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
285 kfree(txq->cmd[i]);
286
287 /* De-alloc circular buffer of TFDs */
288 if (txq->q.n_bd)
289 pci_free_consistent(dev, sizeof(struct iwl3945_tfd) *
290 txq->q.n_bd, txq->tfds39, txq->q.dma_addr);
291
292 /* De-alloc array of per-TFD driver data */
293 kfree(txq->txb);
294 txq->txb = NULL;
295
296 /* 0-fill queue descriptor structure */
297 memset(txq, 0, sizeof(*txq));
298 }
299
300 /*************** STATION TABLE MANAGEMENT ****
301 * mac80211 should be examined to determine if sta_info is duplicating
302 * the functionality provided here
303 */
304
305 /**************************************************************/
306 #if 0 /* temporary disable till we add real remove station */
307 /**
308 * iwl3945_remove_station - Remove driver's knowledge of station.
309 *
310 * NOTE: This does not remove station from device's station table.
311 */
312 static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
313 {
314 int index = IWL_INVALID_STATION;
315 int i;
316 unsigned long flags;
317
318 spin_lock_irqsave(&priv->sta_lock, flags);
319
320 if (is_ap)
321 index = IWL_AP_ID;
322 else if (is_broadcast_ether_addr(addr))
323 index = priv->hw_params.bcast_sta_id;
324 else
325 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
326 if (priv->stations_39[i].used &&
327 !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
328 addr)) {
329 index = i;
330 break;
331 }
332
333 if (unlikely(index == IWL_INVALID_STATION))
334 goto out;
335
336 if (priv->stations_39[index].used) {
337 priv->stations_39[index].used = 0;
338 priv->num_stations--;
339 }
340
341 BUG_ON(priv->num_stations < 0);
342
343 out:
344 spin_unlock_irqrestore(&priv->sta_lock, flags);
345 return 0;
346 }
347 #endif
348
349 /**
350 * iwl3945_clear_stations_table - Clear the driver's station table
351 *
352 * NOTE: This does not clear or otherwise alter the device's station table.
353 */
354 static void iwl3945_clear_stations_table(struct iwl_priv *priv)
355 {
356 unsigned long flags;
357
358 spin_lock_irqsave(&priv->sta_lock, flags);
359
360 priv->num_stations = 0;
361 memset(priv->stations_39, 0, sizeof(priv->stations_39));
362
363 spin_unlock_irqrestore(&priv->sta_lock, flags);
364 }
365
366 /**
367 * iwl3945_add_station - Add station to station tables in driver and device
368 */
369 u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
370 {
371 int i;
372 int index = IWL_INVALID_STATION;
373 struct iwl3945_station_entry *station;
374 unsigned long flags_spin;
375 u8 rate;
376
377 spin_lock_irqsave(&priv->sta_lock, flags_spin);
378 if (is_ap)
379 index = IWL_AP_ID;
380 else if (is_broadcast_ether_addr(addr))
381 index = priv->hw_params.bcast_sta_id;
382 else
383 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
384 if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
385 addr)) {
386 index = i;
387 break;
388 }
389
390 if (!priv->stations_39[i].used &&
391 index == IWL_INVALID_STATION)
392 index = i;
393 }
394
395 /* These two conditions has the same outcome but keep them separate
396 since they have different meaning */
397 if (unlikely(index == IWL_INVALID_STATION)) {
398 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
399 return index;
400 }
401
402 if (priv->stations_39[index].used &&
403 !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
404 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
405 return index;
406 }
407
408 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
409 station = &priv->stations_39[index];
410 station->used = 1;
411 priv->num_stations++;
412
413 /* Set up the REPLY_ADD_STA command to send to device */
414 memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
415 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
416 station->sta.mode = 0;
417 station->sta.sta.sta_id = index;
418 station->sta.station_flags = 0;
419
420 if (priv->band == IEEE80211_BAND_5GHZ)
421 rate = IWL_RATE_6M_PLCP;
422 else
423 rate = IWL_RATE_1M_PLCP;
424
425 /* Turn on both antennas for the station... */
426 station->sta.rate_n_flags =
427 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
428
429 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
430
431 /* Add station to device's station table */
432 iwl3945_send_add_station(priv, &station->sta, flags);
433 return index;
434
435 }
436
437 int iwl3945_send_statistics_request(struct iwl_priv *priv)
438 {
439 u32 val = 0;
440
441 struct iwl_host_cmd cmd = {
442 .id = REPLY_STATISTICS_CMD,
443 .len = sizeof(val),
444 .data = &val,
445 };
446
447 return iwl_send_cmd_sync(priv, &cmd);
448 }
449
450 /**
451 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
452 * @band: 2.4 or 5 GHz band
453 * @channel: Any channel valid for the requested band
454
455 * In addition to setting the staging RXON, priv->band is also set.
456 *
457 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
458 * in the staging RXON flag structure based on the band
459 */
460 static int iwl3945_set_rxon_channel(struct iwl_priv *priv,
461 enum ieee80211_band band,
462 u16 channel)
463 {
464 if (!iwl3945_get_channel_info(priv, band, channel)) {
465 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
466 channel, band);
467 return -EINVAL;
468 }
469
470 if ((le16_to_cpu(priv->staging39_rxon.channel) == channel) &&
471 (priv->band == band))
472 return 0;
473
474 priv->staging39_rxon.channel = cpu_to_le16(channel);
475 if (band == IEEE80211_BAND_5GHZ)
476 priv->staging39_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
477 else
478 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
479
480 priv->band = band;
481
482 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
483
484 return 0;
485 }
486
487 /**
488 * iwl3945_check_rxon_cmd - validate RXON structure is valid
489 *
490 * NOTE: This is really only useful during development and can eventually
491 * be #ifdef'd out once the driver is stable and folks aren't actively
492 * making changes
493 */
494 static int iwl3945_check_rxon_cmd(struct iwl_priv *priv)
495 {
496 int error = 0;
497 int counter = 1;
498 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
499
500 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
501 error |= le32_to_cpu(rxon->flags &
502 (RXON_FLG_TGJ_NARROW_BAND_MSK |
503 RXON_FLG_RADAR_DETECT_MSK));
504 if (error)
505 IWL_WARN(priv, "check 24G fields %d | %d\n",
506 counter++, error);
507 } else {
508 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
509 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
510 if (error)
511 IWL_WARN(priv, "check 52 fields %d | %d\n",
512 counter++, error);
513 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
514 if (error)
515 IWL_WARN(priv, "check 52 CCK %d | %d\n",
516 counter++, error);
517 }
518 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
519 if (error)
520 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
521
522 /* make sure basic rates 6Mbps and 1Mbps are supported */
523 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
524 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
525 if (error)
526 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
527
528 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
529 if (error)
530 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
531
532 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
533 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
534 if (error)
535 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
536 counter++, error);
537
538 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
539 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
540 if (error)
541 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
542 counter++, error);
543
544 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
545 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
546 if (error)
547 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
548 counter++, error);
549
550 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
551 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
552 RXON_FLG_ANT_A_MSK)) == 0);
553 if (error)
554 IWL_WARN(priv, "check antenna %d %d\n", counter++, error);
555
556 if (error)
557 IWL_WARN(priv, "Tuning to channel %d\n",
558 le16_to_cpu(rxon->channel));
559
560 if (error) {
561 IWL_ERR(priv, "Not a valid rxon_assoc_cmd field values\n");
562 return -1;
563 }
564 return 0;
565 }
566
567 /**
568 * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
569 * @priv: staging_rxon is compared to active_rxon
570 *
571 * If the RXON structure is changing enough to require a new tune,
572 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
573 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
574 */
575 static int iwl3945_full_rxon_required(struct iwl_priv *priv)
576 {
577
578 /* These items are only settable from the full RXON command */
579 if (!(iwl3945_is_associated(priv)) ||
580 compare_ether_addr(priv->staging39_rxon.bssid_addr,
581 priv->active39_rxon.bssid_addr) ||
582 compare_ether_addr(priv->staging39_rxon.node_addr,
583 priv->active39_rxon.node_addr) ||
584 compare_ether_addr(priv->staging39_rxon.wlap_bssid_addr,
585 priv->active39_rxon.wlap_bssid_addr) ||
586 (priv->staging39_rxon.dev_type != priv->active39_rxon.dev_type) ||
587 (priv->staging39_rxon.channel != priv->active39_rxon.channel) ||
588 (priv->staging39_rxon.air_propagation !=
589 priv->active39_rxon.air_propagation) ||
590 (priv->staging39_rxon.assoc_id != priv->active39_rxon.assoc_id))
591 return 1;
592
593 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
594 * be updated with the RXON_ASSOC command -- however only some
595 * flag transitions are allowed using RXON_ASSOC */
596
597 /* Check if we are not switching bands */
598 if ((priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
599 (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK))
600 return 1;
601
602 /* Check if we are switching association toggle */
603 if ((priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
604 (priv->active39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
605 return 1;
606
607 return 0;
608 }
609
610 static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
611 {
612 int rc = 0;
613 struct iwl_rx_packet *res = NULL;
614 struct iwl3945_rxon_assoc_cmd rxon_assoc;
615 struct iwl_host_cmd cmd = {
616 .id = REPLY_RXON_ASSOC,
617 .len = sizeof(rxon_assoc),
618 .meta.flags = CMD_WANT_SKB,
619 .data = &rxon_assoc,
620 };
621 const struct iwl3945_rxon_cmd *rxon1 = &priv->staging39_rxon;
622 const struct iwl3945_rxon_cmd *rxon2 = &priv->active39_rxon;
623
624 if ((rxon1->flags == rxon2->flags) &&
625 (rxon1->filter_flags == rxon2->filter_flags) &&
626 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
627 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
628 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
629 return 0;
630 }
631
632 rxon_assoc.flags = priv->staging39_rxon.flags;
633 rxon_assoc.filter_flags = priv->staging39_rxon.filter_flags;
634 rxon_assoc.ofdm_basic_rates = priv->staging39_rxon.ofdm_basic_rates;
635 rxon_assoc.cck_basic_rates = priv->staging39_rxon.cck_basic_rates;
636 rxon_assoc.reserved = 0;
637
638 rc = iwl_send_cmd_sync(priv, &cmd);
639 if (rc)
640 return rc;
641
642 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
643 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
644 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
645 rc = -EIO;
646 }
647
648 priv->alloc_rxb_skb--;
649 dev_kfree_skb_any(cmd.meta.u.skb);
650
651 return rc;
652 }
653
654 /**
655 * iwl3945_commit_rxon - commit staging_rxon to hardware
656 *
657 * The RXON command in staging_rxon is committed to the hardware and
658 * the active_rxon structure is updated with the new data. This
659 * function correctly transitions out of the RXON_ASSOC_MSK state if
660 * a HW tune is required based on the RXON structure changes.
661 */
662 static int iwl3945_commit_rxon(struct iwl_priv *priv)
663 {
664 /* cast away the const for active_rxon in this function */
665 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active39_rxon;
666 int rc = 0;
667
668 if (!iwl_is_alive(priv))
669 return -1;
670
671 /* always get timestamp with Rx frame */
672 priv->staging39_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
673
674 /* select antenna */
675 priv->staging39_rxon.flags &=
676 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
677 priv->staging39_rxon.flags |= iwl3945_get_antenna_flags(priv);
678
679 rc = iwl3945_check_rxon_cmd(priv);
680 if (rc) {
681 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
682 return -EINVAL;
683 }
684
685 /* If we don't need to send a full RXON, we can use
686 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
687 * and other flags for the current radio configuration. */
688 if (!iwl3945_full_rxon_required(priv)) {
689 rc = iwl3945_send_rxon_assoc(priv);
690 if (rc) {
691 IWL_ERR(priv, "Error setting RXON_ASSOC "
692 "configuration (%d).\n", rc);
693 return rc;
694 }
695
696 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
697
698 return 0;
699 }
700
701 /* If we are currently associated and the new config requires
702 * an RXON_ASSOC and the new config wants the associated mask enabled,
703 * we must clear the associated from the active configuration
704 * before we apply the new config */
705 if (iwl3945_is_associated(priv) &&
706 (priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
707 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
708 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
709
710 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
711 sizeof(struct iwl3945_rxon_cmd),
712 &priv->active39_rxon);
713
714 /* If the mask clearing failed then we set
715 * active_rxon back to what it was previously */
716 if (rc) {
717 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
718 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
719 "configuration (%d).\n", rc);
720 return rc;
721 }
722 }
723
724 IWL_DEBUG_INFO("Sending RXON\n"
725 "* with%s RXON_FILTER_ASSOC_MSK\n"
726 "* channel = %d\n"
727 "* bssid = %pM\n",
728 ((priv->staging39_rxon.filter_flags &
729 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
730 le16_to_cpu(priv->staging39_rxon.channel),
731 priv->staging_rxon.bssid_addr);
732
733 /* Apply the new configuration */
734 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
735 sizeof(struct iwl3945_rxon_cmd), &priv->staging39_rxon);
736 if (rc) {
737 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
738 return rc;
739 }
740
741 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
742
743 iwl3945_clear_stations_table(priv);
744
745 /* If we issue a new RXON command which required a tune then we must
746 * send a new TXPOWER command or we won't be able to Tx any frames */
747 rc = priv->cfg->ops->lib->send_tx_power(priv);
748 if (rc) {
749 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
750 return rc;
751 }
752
753 /* Add the broadcast address so we can send broadcast frames */
754 if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
755 IWL_INVALID_STATION) {
756 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
757 return -EIO;
758 }
759
760 /* If we have set the ASSOC_MSK and we are in BSS mode then
761 * add the IWL_AP_ID to the station rate table */
762 if (iwl3945_is_associated(priv) &&
763 (priv->iw_mode == NL80211_IFTYPE_STATION))
764 if (iwl3945_add_station(priv, priv->active39_rxon.bssid_addr, 1, 0)
765 == IWL_INVALID_STATION) {
766 IWL_ERR(priv, "Error adding AP address for transmit\n");
767 return -EIO;
768 }
769
770 /* Init the hardware's rate fallback order based on the band */
771 rc = iwl3945_init_hw_rate_table(priv);
772 if (rc) {
773 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
774 return -EIO;
775 }
776
777 return 0;
778 }
779
780 static int iwl3945_send_bt_config(struct iwl_priv *priv)
781 {
782 struct iwl_bt_cmd bt_cmd = {
783 .flags = 3,
784 .lead_time = 0xAA,
785 .max_kill = 1,
786 .kill_ack_mask = 0,
787 .kill_cts_mask = 0,
788 };
789
790 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
791 sizeof(bt_cmd), &bt_cmd);
792 }
793
794 static int iwl3945_add_sta_sync_callback(struct iwl_priv *priv,
795 struct iwl_cmd *cmd, struct sk_buff *skb)
796 {
797 struct iwl_rx_packet *res = NULL;
798
799 if (!skb) {
800 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
801 return 1;
802 }
803
804 res = (struct iwl_rx_packet *)skb->data;
805 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
806 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
807 res->hdr.flags);
808 return 1;
809 }
810
811 switch (res->u.add_sta.status) {
812 case ADD_STA_SUCCESS_MSK:
813 break;
814 default:
815 break;
816 }
817
818 /* We didn't cache the SKB; let the caller free it */
819 return 1;
820 }
821
822 int iwl3945_send_add_station(struct iwl_priv *priv,
823 struct iwl3945_addsta_cmd *sta, u8 flags)
824 {
825 struct iwl_rx_packet *res = NULL;
826 int rc = 0;
827 struct iwl_host_cmd cmd = {
828 .id = REPLY_ADD_STA,
829 .len = sizeof(struct iwl3945_addsta_cmd),
830 .meta.flags = flags,
831 .data = sta,
832 };
833
834 if (flags & CMD_ASYNC)
835 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
836 else
837 cmd.meta.flags |= CMD_WANT_SKB;
838
839 rc = iwl_send_cmd(priv, &cmd);
840
841 if (rc || (flags & CMD_ASYNC))
842 return rc;
843
844 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
845 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
846 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
847 res->hdr.flags);
848 rc = -EIO;
849 }
850
851 if (rc == 0) {
852 switch (res->u.add_sta.status) {
853 case ADD_STA_SUCCESS_MSK:
854 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
855 break;
856 default:
857 rc = -EIO;
858 IWL_WARN(priv, "REPLY_ADD_STA failed\n");
859 break;
860 }
861 }
862
863 priv->alloc_rxb_skb--;
864 dev_kfree_skb_any(cmd.meta.u.skb);
865
866 return rc;
867 }
868
869 static int iwl3945_update_sta_key_info(struct iwl_priv *priv,
870 struct ieee80211_key_conf *keyconf,
871 u8 sta_id)
872 {
873 unsigned long flags;
874 __le16 key_flags = 0;
875
876 switch (keyconf->alg) {
877 case ALG_CCMP:
878 key_flags |= STA_KEY_FLG_CCMP;
879 key_flags |= cpu_to_le16(
880 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
881 key_flags &= ~STA_KEY_FLG_INVALID;
882 break;
883 case ALG_TKIP:
884 case ALG_WEP:
885 default:
886 return -EINVAL;
887 }
888 spin_lock_irqsave(&priv->sta_lock, flags);
889 priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
890 priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
891 memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
892 keyconf->keylen);
893
894 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
895 keyconf->keylen);
896 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
897 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
898 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
899
900 spin_unlock_irqrestore(&priv->sta_lock, flags);
901
902 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
903 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
904 return 0;
905 }
906
907 static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
908 {
909 unsigned long flags;
910
911 spin_lock_irqsave(&priv->sta_lock, flags);
912 memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
913 memset(&priv->stations_39[sta_id].sta.key, 0,
914 sizeof(struct iwl4965_keyinfo));
915 priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
916 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
917 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
918 spin_unlock_irqrestore(&priv->sta_lock, flags);
919
920 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
921 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
922 return 0;
923 }
924
925 static void iwl3945_clear_free_frames(struct iwl_priv *priv)
926 {
927 struct list_head *element;
928
929 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
930 priv->frames_count);
931
932 while (!list_empty(&priv->free_frames)) {
933 element = priv->free_frames.next;
934 list_del(element);
935 kfree(list_entry(element, struct iwl3945_frame, list));
936 priv->frames_count--;
937 }
938
939 if (priv->frames_count) {
940 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
941 priv->frames_count);
942 priv->frames_count = 0;
943 }
944 }
945
946 static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
947 {
948 struct iwl3945_frame *frame;
949 struct list_head *element;
950 if (list_empty(&priv->free_frames)) {
951 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
952 if (!frame) {
953 IWL_ERR(priv, "Could not allocate frame!\n");
954 return NULL;
955 }
956
957 priv->frames_count++;
958 return frame;
959 }
960
961 element = priv->free_frames.next;
962 list_del(element);
963 return list_entry(element, struct iwl3945_frame, list);
964 }
965
966 static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
967 {
968 memset(frame, 0, sizeof(*frame));
969 list_add(&frame->list, &priv->free_frames);
970 }
971
972 unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
973 struct ieee80211_hdr *hdr,
974 int left)
975 {
976
977 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
978 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
979 (priv->iw_mode != NL80211_IFTYPE_AP)))
980 return 0;
981
982 if (priv->ibss_beacon->len > left)
983 return 0;
984
985 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
986
987 return priv->ibss_beacon->len;
988 }
989
990 static u8 iwl3945_rate_get_lowest_plcp(struct iwl_priv *priv)
991 {
992 u8 i;
993 int rate_mask;
994
995 /* Set rate mask*/
996 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
997 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
998 else
999 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
1000
1001 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1002 i = iwl3945_rates[i].next_ieee) {
1003 if (rate_mask & (1 << i))
1004 return iwl3945_rates[i].plcp;
1005 }
1006
1007 /* No valid rate was found. Assign the lowest one */
1008 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
1009 return IWL_RATE_1M_PLCP;
1010 else
1011 return IWL_RATE_6M_PLCP;
1012 }
1013
1014 static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
1015 {
1016 struct iwl3945_frame *frame;
1017 unsigned int frame_size;
1018 int rc;
1019 u8 rate;
1020
1021 frame = iwl3945_get_free_frame(priv);
1022
1023 if (!frame) {
1024 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
1025 "command.\n");
1026 return -ENOMEM;
1027 }
1028
1029 rate = iwl3945_rate_get_lowest_plcp(priv);
1030
1031 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
1032
1033 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1034 &frame->u.cmd[0]);
1035
1036 iwl3945_free_frame(priv, frame);
1037
1038 return rc;
1039 }
1040
1041 /******************************************************************************
1042 *
1043 * EEPROM related functions
1044 *
1045 ******************************************************************************/
1046
1047 static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1048 {
1049 memcpy(mac, priv->eeprom39.mac_address, 6);
1050 }
1051
1052 /*
1053 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1054 * embedded controller) as EEPROM reader; each read is a series of pulses
1055 * to/from the EEPROM chip, not a single event, so even reads could conflict
1056 * if they weren't arbitrated by some ownership mechanism. Here, the driver
1057 * simply claims ownership, which should be safe when this function is called
1058 * (i.e. before loading uCode!).
1059 */
1060 static inline int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
1061 {
1062 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
1063 return 0;
1064 }
1065
1066 /**
1067 * iwl3945_eeprom_init - read EEPROM contents
1068 *
1069 * Load the EEPROM contents from adapter into priv->eeprom39
1070 *
1071 * NOTE: This routine uses the non-debug IO access functions.
1072 */
1073 int iwl3945_eeprom_init(struct iwl_priv *priv)
1074 {
1075 u16 *e = (u16 *)&priv->eeprom39;
1076 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1077 int sz = sizeof(priv->eeprom39);
1078 int ret;
1079 u16 addr;
1080
1081 /* The EEPROM structure has several padding buffers within it
1082 * and when adding new EEPROM maps is subject to programmer errors
1083 * which may be very difficult to identify without explicitly
1084 * checking the resulting size of the eeprom map. */
1085 BUILD_BUG_ON(sizeof(priv->eeprom39) != IWL_EEPROM_IMAGE_SIZE);
1086
1087 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1088 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
1089 return -ENOENT;
1090 }
1091
1092 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
1093 ret = iwl3945_eeprom_acquire_semaphore(priv);
1094 if (ret < 0) {
1095 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
1096 return -ENOENT;
1097 }
1098
1099 /* eeprom is an array of 16bit values */
1100 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1101 u32 r;
1102
1103 _iwl_write32(priv, CSR_EEPROM_REG,
1104 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
1105 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1106 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
1107 CSR_EEPROM_REG_READ_VALID_MSK,
1108 IWL_EEPROM_ACCESS_TIMEOUT);
1109 if (ret < 0) {
1110 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
1111 return ret;
1112 }
1113
1114 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
1115 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1116 }
1117
1118 return 0;
1119 }
1120
1121 static void iwl3945_unset_hw_params(struct iwl_priv *priv)
1122 {
1123 if (priv->shared_virt)
1124 pci_free_consistent(priv->pci_dev,
1125 sizeof(struct iwl3945_shared),
1126 priv->shared_virt,
1127 priv->shared_phys);
1128 }
1129
1130 /*
1131 * QoS support
1132 */
1133 static int iwl3945_send_qos_params_command(struct iwl_priv *priv,
1134 struct iwl_qosparam_cmd *qos)
1135 {
1136
1137 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1138 sizeof(struct iwl_qosparam_cmd), qos);
1139 }
1140
1141 static void iwl3945_activate_qos(struct iwl_priv *priv, u8 force)
1142 {
1143 unsigned long flags;
1144
1145 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1146 return;
1147
1148 spin_lock_irqsave(&priv->lock, flags);
1149 priv->qos_data.def_qos_parm.qos_flags = 0;
1150
1151 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1152 !priv->qos_data.qos_cap.q_AP.txop_request)
1153 priv->qos_data.def_qos_parm.qos_flags |=
1154 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1155
1156 if (priv->qos_data.qos_active)
1157 priv->qos_data.def_qos_parm.qos_flags |=
1158 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1159
1160 spin_unlock_irqrestore(&priv->lock, flags);
1161
1162 if (force || iwl3945_is_associated(priv)) {
1163 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
1164 priv->qos_data.qos_active);
1165
1166 iwl3945_send_qos_params_command(priv,
1167 &(priv->qos_data.def_qos_parm));
1168 }
1169 }
1170
1171 /*
1172 * Power management (not Tx power!) functions
1173 */
1174 #define MSEC_TO_USEC 1024
1175
1176
1177 /* default power management (not Tx power) table values */
1178 /* for TIM 0-10 */
1179 static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
1180 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1181 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1182 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1183 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1184 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1185 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1186 };
1187
1188 /* for TIM > 10 */
1189 static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
1190 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1191 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1192 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1193 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1194 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1195 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1196 };
1197
1198 int iwl3945_power_init_handle(struct iwl_priv *priv)
1199 {
1200 int rc = 0, i;
1201 struct iwl_power_mgr *pow_data;
1202 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
1203 u16 pci_pm;
1204
1205 IWL_DEBUG_POWER("Initialize power \n");
1206
1207 pow_data = &priv->power_data;
1208
1209 memset(pow_data, 0, sizeof(*pow_data));
1210
1211 pow_data->dtim_period = 1;
1212
1213 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1214 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1215
1216 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1217 if (rc != 0)
1218 return 0;
1219 else {
1220 struct iwl_powertable_cmd *cmd;
1221
1222 IWL_DEBUG_POWER("adjust power command flags\n");
1223
1224 for (i = 0; i < IWL_POWER_MAX; i++) {
1225 cmd = &pow_data->pwr_range_0[i].cmd;
1226
1227 if (pci_pm & 0x1)
1228 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1229 else
1230 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1231 }
1232 }
1233 return rc;
1234 }
1235
1236 static int iwl3945_update_power_cmd(struct iwl_priv *priv,
1237 struct iwl_powertable_cmd *cmd, u32 mode)
1238 {
1239 struct iwl_power_mgr *pow_data;
1240 struct iwl_power_vec_entry *range;
1241 u32 max_sleep = 0;
1242 int i;
1243 u8 period = 0;
1244 bool skip;
1245
1246 if (mode > IWL_POWER_INDEX_5) {
1247 IWL_DEBUG_POWER("Error invalid power mode \n");
1248 return -EINVAL;
1249 }
1250 pow_data = &priv->power_data;
1251
1252 if (pow_data->dtim_period < 10)
1253 range = &pow_data->pwr_range_0[0];
1254 else
1255 range = &pow_data->pwr_range_1[1];
1256
1257 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
1258
1259
1260 if (period == 0) {
1261 period = 1;
1262 skip = false;
1263 } else {
1264 skip = !!range[mode].no_dtim;
1265 }
1266
1267 if (skip) {
1268 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1269 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1270 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1271 } else {
1272 max_sleep = period;
1273 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1274 }
1275
1276 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
1277 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1278 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1279
1280 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1281 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1282 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1283 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1284 le32_to_cpu(cmd->sleep_interval[0]),
1285 le32_to_cpu(cmd->sleep_interval[1]),
1286 le32_to_cpu(cmd->sleep_interval[2]),
1287 le32_to_cpu(cmd->sleep_interval[3]),
1288 le32_to_cpu(cmd->sleep_interval[4]));
1289
1290 return 0;
1291 }
1292
1293 static int iwl3945_send_power_mode(struct iwl_priv *priv, u32 mode)
1294 {
1295 u32 uninitialized_var(final_mode);
1296 int rc;
1297 struct iwl_powertable_cmd cmd;
1298
1299 /* If on battery, set to 3,
1300 * if plugged into AC power, set to CAM ("continuously aware mode"),
1301 * else user level */
1302 switch (mode) {
1303 case IWL39_POWER_BATTERY:
1304 final_mode = IWL_POWER_INDEX_3;
1305 break;
1306 case IWL39_POWER_AC:
1307 final_mode = IWL_POWER_MODE_CAM;
1308 break;
1309 default:
1310 final_mode = mode;
1311 break;
1312 }
1313
1314 iwl3945_update_power_cmd(priv, &cmd, final_mode);
1315
1316 /* FIXME use get_hcmd_size 3945 command is 4 bytes shorter */
1317 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
1318 sizeof(struct iwl3945_powertable_cmd), &cmd);
1319
1320 if (final_mode == IWL_POWER_MODE_CAM)
1321 clear_bit(STATUS_POWER_PMI, &priv->status);
1322 else
1323 set_bit(STATUS_POWER_PMI, &priv->status);
1324
1325 return rc;
1326 }
1327
1328 #define MAX_UCODE_BEACON_INTERVAL 1024
1329 #define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
1330
1331 static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
1332 {
1333 u16 new_val = 0;
1334 u16 beacon_factor = 0;
1335
1336 beacon_factor =
1337 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
1338 / MAX_UCODE_BEACON_INTERVAL;
1339 new_val = beacon_val / beacon_factor;
1340
1341 return cpu_to_le16(new_val);
1342 }
1343
1344 static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
1345 {
1346 u64 interval_tm_unit;
1347 u64 tsf, result;
1348 unsigned long flags;
1349 struct ieee80211_conf *conf = NULL;
1350 u16 beacon_int = 0;
1351
1352 conf = ieee80211_get_hw_conf(priv->hw);
1353
1354 spin_lock_irqsave(&priv->lock, flags);
1355 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
1356 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
1357
1358 tsf = priv->timestamp;
1359
1360 beacon_int = priv->beacon_int;
1361 spin_unlock_irqrestore(&priv->lock, flags);
1362
1363 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1364 if (beacon_int == 0) {
1365 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
1366 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
1367 } else {
1368 priv->rxon_timing.beacon_interval =
1369 cpu_to_le16(beacon_int);
1370 priv->rxon_timing.beacon_interval =
1371 iwl3945_adjust_beacon_interval(
1372 le16_to_cpu(priv->rxon_timing.beacon_interval));
1373 }
1374
1375 priv->rxon_timing.atim_window = 0;
1376 } else {
1377 priv->rxon_timing.beacon_interval =
1378 iwl3945_adjust_beacon_interval(conf->beacon_int);
1379 /* TODO: we need to get atim_window from upper stack
1380 * for now we set to 0 */
1381 priv->rxon_timing.atim_window = 0;
1382 }
1383
1384 interval_tm_unit =
1385 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
1386 result = do_div(tsf, interval_tm_unit);
1387 priv->rxon_timing.beacon_init_val =
1388 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
1389
1390 IWL_DEBUG_ASSOC
1391 ("beacon interval %d beacon timer %d beacon tim %d\n",
1392 le16_to_cpu(priv->rxon_timing.beacon_interval),
1393 le32_to_cpu(priv->rxon_timing.beacon_init_val),
1394 le16_to_cpu(priv->rxon_timing.atim_window));
1395 }
1396
1397 static int iwl3945_scan_initiate(struct iwl_priv *priv)
1398 {
1399 if (!iwl_is_ready_rf(priv)) {
1400 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1401 return -EIO;
1402 }
1403
1404 if (test_bit(STATUS_SCANNING, &priv->status)) {
1405 IWL_DEBUG_SCAN("Scan already in progress.\n");
1406 return -EAGAIN;
1407 }
1408
1409 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1410 IWL_DEBUG_SCAN("Scan request while abort pending. "
1411 "Queuing.\n");
1412 return -EAGAIN;
1413 }
1414
1415 IWL_DEBUG_INFO("Starting scan...\n");
1416 if (priv->cfg->sku & IWL_SKU_G)
1417 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
1418 if (priv->cfg->sku & IWL_SKU_A)
1419 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
1420 set_bit(STATUS_SCANNING, &priv->status);
1421 priv->scan_start = jiffies;
1422 priv->scan_pass_start = priv->scan_start;
1423
1424 queue_work(priv->workqueue, &priv->request_scan);
1425
1426 return 0;
1427 }
1428
1429 static int iwl3945_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
1430 {
1431 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
1432
1433 if (hw_decrypt)
1434 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
1435 else
1436 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
1437
1438 return 0;
1439 }
1440
1441 static void iwl3945_set_flags_for_phymode(struct iwl_priv *priv,
1442 enum ieee80211_band band)
1443 {
1444 if (band == IEEE80211_BAND_5GHZ) {
1445 priv->staging39_rxon.flags &=
1446 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1447 | RXON_FLG_CCK_MSK);
1448 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1449 } else {
1450 /* Copied from iwl3945_bg_post_associate() */
1451 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
1452 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1453 else
1454 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1455
1456 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
1457 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1458
1459 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1460 priv->staging39_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1461 priv->staging39_rxon.flags &= ~RXON_FLG_CCK_MSK;
1462 }
1463 }
1464
1465 /*
1466 * initialize rxon structure with default values from eeprom
1467 */
1468 static void iwl3945_connection_init_rx_config(struct iwl_priv *priv,
1469 int mode)
1470 {
1471 const struct iwl_channel_info *ch_info;
1472
1473 memset(&priv->staging39_rxon, 0, sizeof(priv->staging39_rxon));
1474
1475 switch (mode) {
1476 case NL80211_IFTYPE_AP:
1477 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_AP;
1478 break;
1479
1480 case NL80211_IFTYPE_STATION:
1481 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_ESS;
1482 priv->staging39_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
1483 break;
1484
1485 case NL80211_IFTYPE_ADHOC:
1486 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1487 priv->staging39_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1488 priv->staging39_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1489 RXON_FILTER_ACCEPT_GRP_MSK;
1490 break;
1491
1492 case NL80211_IFTYPE_MONITOR:
1493 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
1494 priv->staging39_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
1495 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
1496 break;
1497 default:
1498 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
1499 break;
1500 }
1501
1502 #if 0
1503 /* TODO: Figure out when short_preamble would be set and cache from
1504 * that */
1505 if (!hw_to_local(priv->hw)->short_preamble)
1506 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1507 else
1508 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1509 #endif
1510
1511 ch_info = iwl3945_get_channel_info(priv, priv->band,
1512 le16_to_cpu(priv->active39_rxon.channel));
1513
1514 if (!ch_info)
1515 ch_info = &priv->channel_info[0];
1516
1517 /*
1518 * in some case A channels are all non IBSS
1519 * in this case force B/G channel
1520 */
1521 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
1522 ch_info = &priv->channel_info[0];
1523
1524 priv->staging39_rxon.channel = cpu_to_le16(ch_info->channel);
1525 if (is_channel_a_band(ch_info))
1526 priv->band = IEEE80211_BAND_5GHZ;
1527 else
1528 priv->band = IEEE80211_BAND_2GHZ;
1529
1530 iwl3945_set_flags_for_phymode(priv, priv->band);
1531
1532 priv->staging39_rxon.ofdm_basic_rates =
1533 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1534 priv->staging39_rxon.cck_basic_rates =
1535 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1536 }
1537
1538 static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
1539 {
1540 if (mode == NL80211_IFTYPE_ADHOC) {
1541 const struct iwl_channel_info *ch_info;
1542
1543 ch_info = iwl3945_get_channel_info(priv,
1544 priv->band,
1545 le16_to_cpu(priv->staging39_rxon.channel));
1546
1547 if (!ch_info || !is_channel_ibss(ch_info)) {
1548 IWL_ERR(priv, "channel %d not IBSS channel\n",
1549 le16_to_cpu(priv->staging39_rxon.channel));
1550 return -EINVAL;
1551 }
1552 }
1553
1554 iwl3945_connection_init_rx_config(priv, mode);
1555 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1556
1557 iwl3945_clear_stations_table(priv);
1558
1559 /* don't commit rxon if rf-kill is on*/
1560 if (!iwl_is_ready_rf(priv))
1561 return -EAGAIN;
1562
1563 cancel_delayed_work(&priv->scan_check);
1564 if (iwl_scan_cancel_timeout(priv, 100)) {
1565 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
1566 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
1567 return -EAGAIN;
1568 }
1569
1570 iwl3945_commit_rxon(priv);
1571
1572 return 0;
1573 }
1574
1575 static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
1576 struct ieee80211_tx_info *info,
1577 struct iwl_cmd *cmd,
1578 struct sk_buff *skb_frag,
1579 int last_frag)
1580 {
1581 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1582 struct iwl3945_hw_key *keyinfo =
1583 &priv->stations_39[info->control.hw_key->hw_key_idx].keyinfo;
1584
1585 switch (keyinfo->alg) {
1586 case ALG_CCMP:
1587 tx->sec_ctl = TX_CMD_SEC_CCM;
1588 memcpy(tx->key, keyinfo->key, keyinfo->keylen);
1589 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
1590 break;
1591
1592 case ALG_TKIP:
1593 #if 0
1594 tx->sec_ctl = TX_CMD_SEC_TKIP;
1595
1596 if (last_frag)
1597 memcpy(tx->tkip_mic.byte, skb_frag->tail - 8,
1598 8);
1599 else
1600 memset(tx->tkip_mic.byte, 0, 8);
1601 #endif
1602 break;
1603
1604 case ALG_WEP:
1605 tx->sec_ctl = TX_CMD_SEC_WEP |
1606 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
1607
1608 if (keyinfo->keylen == 13)
1609 tx->sec_ctl |= TX_CMD_SEC_KEY128;
1610
1611 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
1612
1613 IWL_DEBUG_TX("Configuring packet for WEP encryption "
1614 "with key %d\n", info->control.hw_key->hw_key_idx);
1615 break;
1616
1617 default:
1618 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg);
1619 break;
1620 }
1621 }
1622
1623 /*
1624 * handle build REPLY_TX command notification.
1625 */
1626 static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
1627 struct iwl_cmd *cmd,
1628 struct ieee80211_tx_info *info,
1629 struct ieee80211_hdr *hdr, u8 std_id)
1630 {
1631 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1632 __le32 tx_flags = tx->tx_flags;
1633 __le16 fc = hdr->frame_control;
1634 u8 rc_flags = info->control.rates[0].flags;
1635
1636 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1637 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1638 tx_flags |= TX_CMD_FLG_ACK_MSK;
1639 if (ieee80211_is_mgmt(fc))
1640 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1641 if (ieee80211_is_probe_resp(fc) &&
1642 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1643 tx_flags |= TX_CMD_FLG_TSF_MSK;
1644 } else {
1645 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1646 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1647 }
1648
1649 tx->sta_id = std_id;
1650 if (ieee80211_has_morefrags(fc))
1651 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1652
1653 if (ieee80211_is_data_qos(fc)) {
1654 u8 *qc = ieee80211_get_qos_ctl(hdr);
1655 tx->tid_tspec = qc[0] & 0xf;
1656 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1657 } else {
1658 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1659 }
1660
1661 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1662 tx_flags |= TX_CMD_FLG_RTS_MSK;
1663 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
1664 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1665 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
1666 tx_flags |= TX_CMD_FLG_CTS_MSK;
1667 }
1668
1669 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
1670 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
1671
1672 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1673 if (ieee80211_is_mgmt(fc)) {
1674 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1675 tx->timeout.pm_frame_timeout = cpu_to_le16(3);
1676 else
1677 tx->timeout.pm_frame_timeout = cpu_to_le16(2);
1678 } else {
1679 tx->timeout.pm_frame_timeout = 0;
1680 #ifdef CONFIG_IWL3945_LEDS
1681 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
1682 #endif
1683 }
1684
1685 tx->driver_txop = 0;
1686 tx->tx_flags = tx_flags;
1687 tx->next_frame_len = 0;
1688 }
1689
1690 /**
1691 * iwl3945_get_sta_id - Find station's index within station table
1692 */
1693 static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1694 {
1695 int sta_id;
1696 u16 fc = le16_to_cpu(hdr->frame_control);
1697
1698 /* If this frame is broadcast or management, use broadcast station id */
1699 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
1700 is_multicast_ether_addr(hdr->addr1))
1701 return priv->hw_params.bcast_sta_id;
1702
1703 switch (priv->iw_mode) {
1704
1705 /* If we are a client station in a BSS network, use the special
1706 * AP station entry (that's the only station we communicate with) */
1707 case NL80211_IFTYPE_STATION:
1708 return IWL_AP_ID;
1709
1710 /* If we are an AP, then find the station, or use BCAST */
1711 case NL80211_IFTYPE_AP:
1712 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
1713 if (sta_id != IWL_INVALID_STATION)
1714 return sta_id;
1715 return priv->hw_params.bcast_sta_id;
1716
1717 /* If this frame is going out to an IBSS network, find the station,
1718 * or create a new station table entry */
1719 case NL80211_IFTYPE_ADHOC: {
1720 /* Create new station table entry */
1721 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
1722 if (sta_id != IWL_INVALID_STATION)
1723 return sta_id;
1724
1725 sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
1726
1727 if (sta_id != IWL_INVALID_STATION)
1728 return sta_id;
1729
1730 IWL_DEBUG_DROP("Station %pM not in station map. "
1731 "Defaulting to broadcast...\n",
1732 hdr->addr1);
1733 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
1734 return priv->hw_params.bcast_sta_id;
1735 }
1736 /* If we are in monitor mode, use BCAST. This is required for
1737 * packet injection. */
1738 case NL80211_IFTYPE_MONITOR:
1739 return priv->hw_params.bcast_sta_id;
1740
1741 default:
1742 IWL_WARN(priv, "Unknown mode of operation: %d\n",
1743 priv->iw_mode);
1744 return priv->hw_params.bcast_sta_id;
1745 }
1746 }
1747
1748 /*
1749 * start REPLY_TX command process
1750 */
1751 static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1752 {
1753 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1754 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1755 struct iwl3945_tx_cmd *tx;
1756 struct iwl_tx_queue *txq = NULL;
1757 struct iwl_queue *q = NULL;
1758 struct iwl_cmd *out_cmd = NULL;
1759 dma_addr_t phys_addr;
1760 dma_addr_t txcmd_phys;
1761 int txq_id = skb_get_queue_mapping(skb);
1762 u16 len, idx, len_org, hdr_len;
1763 u8 id;
1764 u8 unicast;
1765 u8 sta_id;
1766 u8 tid = 0;
1767 u16 seq_number = 0;
1768 __le16 fc;
1769 u8 wait_write_ptr = 0;
1770 u8 *qc = NULL;
1771 unsigned long flags;
1772 int rc;
1773
1774 spin_lock_irqsave(&priv->lock, flags);
1775 if (iwl_is_rfkill(priv)) {
1776 IWL_DEBUG_DROP("Dropping - RF KILL\n");
1777 goto drop_unlock;
1778 }
1779
1780 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
1781 IWL_ERR(priv, "ERROR: No TX rate available.\n");
1782 goto drop_unlock;
1783 }
1784
1785 unicast = !is_multicast_ether_addr(hdr->addr1);
1786 id = 0;
1787
1788 fc = hdr->frame_control;
1789
1790 #ifdef CONFIG_IWL3945_DEBUG
1791 if (ieee80211_is_auth(fc))
1792 IWL_DEBUG_TX("Sending AUTH frame\n");
1793 else if (ieee80211_is_assoc_req(fc))
1794 IWL_DEBUG_TX("Sending ASSOC frame\n");
1795 else if (ieee80211_is_reassoc_req(fc))
1796 IWL_DEBUG_TX("Sending REASSOC frame\n");
1797 #endif
1798
1799 /* drop all data frame if we are not associated */
1800 if (ieee80211_is_data(fc) &&
1801 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
1802 (!iwl3945_is_associated(priv) ||
1803 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
1804 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
1805 goto drop_unlock;
1806 }
1807
1808 spin_unlock_irqrestore(&priv->lock, flags);
1809
1810 hdr_len = ieee80211_hdrlen(fc);
1811
1812 /* Find (or create) index into station table for destination station */
1813 sta_id = iwl3945_get_sta_id(priv, hdr);
1814 if (sta_id == IWL_INVALID_STATION) {
1815 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
1816 hdr->addr1);
1817 goto drop;
1818 }
1819
1820 IWL_DEBUG_RATE("station Id %d\n", sta_id);
1821
1822 if (ieee80211_is_data_qos(fc)) {
1823 qc = ieee80211_get_qos_ctl(hdr);
1824 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1825 seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
1826 IEEE80211_SCTL_SEQ;
1827 hdr->seq_ctrl = cpu_to_le16(seq_number) |
1828 (hdr->seq_ctrl &
1829 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
1830 seq_number += 0x10;
1831 }
1832
1833 /* Descriptor for chosen Tx queue */
1834 txq = &priv->txq[txq_id];
1835 q = &txq->q;
1836
1837 spin_lock_irqsave(&priv->lock, flags);
1838
1839 idx = get_cmd_index(q, q->write_ptr, 0);
1840
1841 /* Set up driver data for this TFD */
1842 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1843 txq->txb[q->write_ptr].skb[0] = skb;
1844
1845 /* Init first empty entry in queue's array of Tx/cmd buffers */
1846 out_cmd = txq->cmd[idx];
1847 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
1848 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1849 memset(tx, 0, sizeof(*tx));
1850
1851 /*
1852 * Set up the Tx-command (not MAC!) header.
1853 * Store the chosen Tx queue and TFD index within the sequence field;
1854 * after Tx, uCode's Tx response will return this value so driver can
1855 * locate the frame within the tx queue and do post-tx processing.
1856 */
1857 out_cmd->hdr.cmd = REPLY_TX;
1858 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1859 INDEX_TO_SEQ(q->write_ptr)));
1860
1861 /* Copy MAC header from skb into command buffer */
1862 memcpy(tx->hdr, hdr, hdr_len);
1863
1864 /*
1865 * Use the first empty entry in this queue's command buffer array
1866 * to contain the Tx command and MAC header concatenated together
1867 * (payload data will be in another buffer).
1868 * Size of this varies, due to varying MAC header length.
1869 * If end is not dword aligned, we'll have 2 extra bytes at the end
1870 * of the MAC header (device reads on dword boundaries).
1871 * We'll tell device about this padding later.
1872 */
1873 len = sizeof(struct iwl3945_tx_cmd) +
1874 sizeof(struct iwl_cmd_header) + hdr_len;
1875
1876 len_org = len;
1877 len = (len + 3) & ~3;
1878
1879 if (len_org != len)
1880 len_org = 1;
1881 else
1882 len_org = 0;
1883
1884 /* Physical address of this Tx command's header (not MAC header!),
1885 * within command buffer array. */
1886 txcmd_phys = pci_map_single(priv->pci_dev,
1887 out_cmd, sizeof(struct iwl_cmd),
1888 PCI_DMA_TODEVICE);
1889 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
1890 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
1891 /* Add buffer containing Tx command and MAC(!) header to TFD's
1892 * first entry */
1893 txcmd_phys += offsetof(struct iwl_cmd, hdr);
1894
1895 /* Add buffer containing Tx command and MAC(!) header to TFD's
1896 * first entry */
1897 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1898 txcmd_phys, len, 1, 0);
1899
1900 if (info->control.hw_key)
1901 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
1902
1903 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1904 * if any (802.11 null frames have no payload). */
1905 len = skb->len - hdr_len;
1906 if (len) {
1907 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
1908 len, PCI_DMA_TODEVICE);
1909 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1910 phys_addr, len,
1911 0, U32_PAD(len));
1912 }
1913
1914 /* Total # bytes to be transmitted */
1915 len = (u16)skb->len;
1916 tx->len = cpu_to_le16(len);
1917
1918 /* TODO need this for burst mode later on */
1919 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
1920
1921 /* set is_hcca to 0; it probably will never be implemented */
1922 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
1923
1924 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
1925 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
1926
1927 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1928 txq->need_update = 1;
1929 if (qc)
1930 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
1931 } else {
1932 wait_write_ptr = 1;
1933 txq->need_update = 0;
1934 }
1935
1936 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
1937
1938 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
1939 ieee80211_hdrlen(fc));
1940
1941 /* Tell device the write index *just past* this latest filled TFD */
1942 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1943 rc = iwl_txq_update_write_ptr(priv, txq);
1944 spin_unlock_irqrestore(&priv->lock, flags);
1945
1946 if (rc)
1947 return rc;
1948
1949 if ((iwl_queue_space(q) < q->high_mark)
1950 && priv->mac80211_registered) {
1951 if (wait_write_ptr) {
1952 spin_lock_irqsave(&priv->lock, flags);
1953 txq->need_update = 1;
1954 iwl_txq_update_write_ptr(priv, txq);
1955 spin_unlock_irqrestore(&priv->lock, flags);
1956 }
1957
1958 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
1959 }
1960
1961 return 0;
1962
1963 drop_unlock:
1964 spin_unlock_irqrestore(&priv->lock, flags);
1965 drop:
1966 return -1;
1967 }
1968
1969 static void iwl3945_set_rate(struct iwl_priv *priv)
1970 {
1971 const struct ieee80211_supported_band *sband = NULL;
1972 struct ieee80211_rate *rate;
1973 int i;
1974
1975 sband = iwl_get_hw_mode(priv, priv->band);
1976 if (!sband) {
1977 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
1978 return;
1979 }
1980
1981 priv->active_rate = 0;
1982 priv->active_rate_basic = 0;
1983
1984 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
1985 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
1986
1987 for (i = 0; i < sband->n_bitrates; i++) {
1988 rate = &sband->bitrates[i];
1989 if ((rate->hw_value < IWL_RATE_COUNT) &&
1990 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
1991 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
1992 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
1993 priv->active_rate |= (1 << rate->hw_value);
1994 }
1995 }
1996
1997 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
1998 priv->active_rate, priv->active_rate_basic);
1999
2000 /*
2001 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2002 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2003 * OFDM
2004 */
2005 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2006 priv->staging39_rxon.cck_basic_rates =
2007 ((priv->active_rate_basic &
2008 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2009 else
2010 priv->staging39_rxon.cck_basic_rates =
2011 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2012
2013 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2014 priv->staging39_rxon.ofdm_basic_rates =
2015 ((priv->active_rate_basic &
2016 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2017 IWL_FIRST_OFDM_RATE) & 0xFF;
2018 else
2019 priv->staging39_rxon.ofdm_basic_rates =
2020 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2021 }
2022
2023 static void iwl3945_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2024 {
2025 unsigned long flags;
2026
2027 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2028 return;
2029
2030 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2031 disable_radio ? "OFF" : "ON");
2032
2033 if (disable_radio) {
2034 iwl_scan_cancel(priv);
2035 /* FIXME: This is a workaround for AP */
2036 if (priv->iw_mode != NL80211_IFTYPE_AP) {
2037 spin_lock_irqsave(&priv->lock, flags);
2038 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2039 CSR_UCODE_SW_BIT_RFKILL);
2040 spin_unlock_irqrestore(&priv->lock, flags);
2041 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
2042 set_bit(STATUS_RF_KILL_SW, &priv->status);
2043 }
2044 return;
2045 }
2046
2047 spin_lock_irqsave(&priv->lock, flags);
2048 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2049
2050 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2051 spin_unlock_irqrestore(&priv->lock, flags);
2052
2053 /* wake up ucode */
2054 msleep(10);
2055
2056 spin_lock_irqsave(&priv->lock, flags);
2057 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2058 if (!iwl_grab_nic_access(priv))
2059 iwl_release_nic_access(priv);
2060 spin_unlock_irqrestore(&priv->lock, flags);
2061
2062 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2063 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2064 "disabled by HW switch\n");
2065 return;
2066 }
2067
2068 if (priv->is_open)
2069 queue_work(priv->workqueue, &priv->restart);
2070 return;
2071 }
2072
2073 void iwl3945_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
2074 u32 decrypt_res, struct ieee80211_rx_status *stats)
2075 {
2076 u16 fc =
2077 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2078
2079 if (priv->active39_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2080 return;
2081
2082 if (!(fc & IEEE80211_FCTL_PROTECTED))
2083 return;
2084
2085 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2086 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2087 case RX_RES_STATUS_SEC_TYPE_TKIP:
2088 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2089 RX_RES_STATUS_BAD_ICV_MIC)
2090 stats->flag |= RX_FLAG_MMIC_ERROR;
2091 case RX_RES_STATUS_SEC_TYPE_WEP:
2092 case RX_RES_STATUS_SEC_TYPE_CCMP:
2093 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2094 RX_RES_STATUS_DECRYPT_OK) {
2095 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2096 stats->flag |= RX_FLAG_DECRYPTED;
2097 }
2098 break;
2099
2100 default:
2101 break;
2102 }
2103 }
2104
2105 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
2106
2107 #include "iwl-spectrum.h"
2108
2109 #define BEACON_TIME_MASK_LOW 0x00FFFFFF
2110 #define BEACON_TIME_MASK_HIGH 0xFF000000
2111 #define TIME_UNIT 1024
2112
2113 /*
2114 * extended beacon time format
2115 * time in usec will be changed into a 32-bit value in 8:24 format
2116 * the high 1 byte is the beacon counts
2117 * the lower 3 bytes is the time in usec within one beacon interval
2118 */
2119
2120 static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
2121 {
2122 u32 quot;
2123 u32 rem;
2124 u32 interval = beacon_interval * 1024;
2125
2126 if (!interval || !usec)
2127 return 0;
2128
2129 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2130 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2131
2132 return (quot << 24) + rem;
2133 }
2134
2135 /* base is usually what we get from ucode with each received frame,
2136 * the same as HW timer counter counting down
2137 */
2138
2139 static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
2140 {
2141 u32 base_low = base & BEACON_TIME_MASK_LOW;
2142 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2143 u32 interval = beacon_interval * TIME_UNIT;
2144 u32 res = (base & BEACON_TIME_MASK_HIGH) +
2145 (addon & BEACON_TIME_MASK_HIGH);
2146
2147 if (base_low > addon_low)
2148 res += base_low - addon_low;
2149 else if (base_low < addon_low) {
2150 res += interval + base_low - addon_low;
2151 res += (1 << 24);
2152 } else
2153 res += (1 << 24);
2154
2155 return cpu_to_le32(res);
2156 }
2157
2158 static int iwl3945_get_measurement(struct iwl_priv *priv,
2159 struct ieee80211_measurement_params *params,
2160 u8 type)
2161 {
2162 struct iwl_spectrum_cmd spectrum;
2163 struct iwl_rx_packet *res;
2164 struct iwl_host_cmd cmd = {
2165 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2166 .data = (void *)&spectrum,
2167 .meta.flags = CMD_WANT_SKB,
2168 };
2169 u32 add_time = le64_to_cpu(params->start_time);
2170 int rc;
2171 int spectrum_resp_status;
2172 int duration = le16_to_cpu(params->duration);
2173
2174 if (iwl3945_is_associated(priv))
2175 add_time =
2176 iwl3945_usecs_to_beacons(
2177 le64_to_cpu(params->start_time) - priv->last_tsf,
2178 le16_to_cpu(priv->rxon_timing.beacon_interval));
2179
2180 memset(&spectrum, 0, sizeof(spectrum));
2181
2182 spectrum.channel_count = cpu_to_le16(1);
2183 spectrum.flags =
2184 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2185 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2186 cmd.len = sizeof(spectrum);
2187 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2188
2189 if (iwl3945_is_associated(priv))
2190 spectrum.start_time =
2191 iwl3945_add_beacon_time(priv->last_beacon_time,
2192 add_time,
2193 le16_to_cpu(priv->rxon_timing.beacon_interval));
2194 else
2195 spectrum.start_time = 0;
2196
2197 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2198 spectrum.channels[0].channel = params->channel;
2199 spectrum.channels[0].type = type;
2200 if (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK)
2201 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2202 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2203
2204 rc = iwl_send_cmd_sync(priv, &cmd);
2205 if (rc)
2206 return rc;
2207
2208 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
2209 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2210 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
2211 rc = -EIO;
2212 }
2213
2214 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2215 switch (spectrum_resp_status) {
2216 case 0: /* Command will be handled */
2217 if (res->u.spectrum.id != 0xff) {
2218 IWL_DEBUG_INFO("Replaced existing measurement: %d\n",
2219 res->u.spectrum.id);
2220 priv->measurement_status &= ~MEASUREMENT_READY;
2221 }
2222 priv->measurement_status |= MEASUREMENT_ACTIVE;
2223 rc = 0;
2224 break;
2225
2226 case 1: /* Command will not be handled */
2227 rc = -EAGAIN;
2228 break;
2229 }
2230
2231 dev_kfree_skb_any(cmd.meta.u.skb);
2232
2233 return rc;
2234 }
2235 #endif
2236
2237 static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
2238 struct iwl_rx_mem_buffer *rxb)
2239 {
2240 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2241 struct iwl_alive_resp *palive;
2242 struct delayed_work *pwork;
2243
2244 palive = &pkt->u.alive_frame;
2245
2246 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
2247 "0x%01X 0x%01X\n",
2248 palive->is_valid, palive->ver_type,
2249 palive->ver_subtype);
2250
2251 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2252 IWL_DEBUG_INFO("Initialization Alive received.\n");
2253 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2254 sizeof(struct iwl_alive_resp));
2255 pwork = &priv->init_alive_start;
2256 } else {
2257 IWL_DEBUG_INFO("Runtime Alive received.\n");
2258 memcpy(&priv->card_alive, &pkt->u.alive_frame,
2259 sizeof(struct iwl_alive_resp));
2260 pwork = &priv->alive_start;
2261 iwl3945_disable_events(priv);
2262 }
2263
2264 /* We delay the ALIVE response by 5ms to
2265 * give the HW RF Kill time to activate... */
2266 if (palive->is_valid == UCODE_VALID_OK)
2267 queue_delayed_work(priv->workqueue, pwork,
2268 msecs_to_jiffies(5));
2269 else
2270 IWL_WARN(priv, "uCode did not respond OK.\n");
2271 }
2272
2273 static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
2274 struct iwl_rx_mem_buffer *rxb)
2275 {
2276 #ifdef CONFIG_IWLWIFI_DEBUG
2277 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2278 #endif
2279
2280 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2281 return;
2282 }
2283
2284 static void iwl3945_rx_reply_error(struct iwl_priv *priv,
2285 struct iwl_rx_mem_buffer *rxb)
2286 {
2287 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2288
2289 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
2290 "seq 0x%04X ser 0x%08X\n",
2291 le32_to_cpu(pkt->u.err_resp.error_type),
2292 get_cmd_string(pkt->u.err_resp.cmd_id),
2293 pkt->u.err_resp.cmd_id,
2294 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2295 le32_to_cpu(pkt->u.err_resp.error_info));
2296 }
2297
2298 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2299
2300 static void iwl3945_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
2301 {
2302 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2303 struct iwl3945_rxon_cmd *rxon = (void *)&priv->active39_rxon;
2304 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
2305 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
2306 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
2307 rxon->channel = csa->channel;
2308 priv->staging39_rxon.channel = csa->channel;
2309 }
2310
2311 static void iwl3945_rx_spectrum_measure_notif(struct iwl_priv *priv,
2312 struct iwl_rx_mem_buffer *rxb)
2313 {
2314 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
2315 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2316 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
2317
2318 if (!report->state) {
2319 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
2320 "Spectrum Measure Notification: Start\n");
2321 return;
2322 }
2323
2324 memcpy(&priv->measure_report, report, sizeof(*report));
2325 priv->measurement_status |= MEASUREMENT_READY;
2326 #endif
2327 }
2328
2329 static void iwl3945_rx_pm_sleep_notif(struct iwl_priv *priv,
2330 struct iwl_rx_mem_buffer *rxb)
2331 {
2332 #ifdef CONFIG_IWL3945_DEBUG
2333 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2334 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
2335 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
2336 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
2337 #endif
2338 }
2339
2340 static void iwl3945_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2341 struct iwl_rx_mem_buffer *rxb)
2342 {
2343 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2344 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
2345 "notification for %s:\n",
2346 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
2347 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
2348 le32_to_cpu(pkt->len));
2349 }
2350
2351 static void iwl3945_bg_beacon_update(struct work_struct *work)
2352 {
2353 struct iwl_priv *priv =
2354 container_of(work, struct iwl_priv, beacon_update);
2355 struct sk_buff *beacon;
2356
2357 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
2358 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
2359
2360 if (!beacon) {
2361 IWL_ERR(priv, "update beacon failed\n");
2362 return;
2363 }
2364
2365 mutex_lock(&priv->mutex);
2366 /* new beacon skb is allocated every time; dispose previous.*/
2367 if (priv->ibss_beacon)
2368 dev_kfree_skb(priv->ibss_beacon);
2369
2370 priv->ibss_beacon = beacon;
2371 mutex_unlock(&priv->mutex);
2372
2373 iwl3945_send_beacon_cmd(priv);
2374 }
2375
2376 static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
2377 struct iwl_rx_mem_buffer *rxb)
2378 {
2379 #ifdef CONFIG_IWL3945_DEBUG
2380 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2381 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
2382 u8 rate = beacon->beacon_notify_hdr.rate;
2383
2384 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
2385 "tsf %d %d rate %d\n",
2386 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
2387 beacon->beacon_notify_hdr.failure_frame,
2388 le32_to_cpu(beacon->ibss_mgr_status),
2389 le32_to_cpu(beacon->high_tsf),
2390 le32_to_cpu(beacon->low_tsf), rate);
2391 #endif
2392
2393 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
2394 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
2395 queue_work(priv->workqueue, &priv->beacon_update);
2396 }
2397
2398 /* Service response to REPLY_SCAN_CMD (0x80) */
2399 static void iwl3945_rx_reply_scan(struct iwl_priv *priv,
2400 struct iwl_rx_mem_buffer *rxb)
2401 {
2402 #ifdef CONFIG_IWL3945_DEBUG
2403 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2404 struct iwl_scanreq_notification *notif =
2405 (struct iwl_scanreq_notification *)pkt->u.raw;
2406
2407 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
2408 #endif
2409 }
2410
2411 /* Service SCAN_START_NOTIFICATION (0x82) */
2412 static void iwl3945_rx_scan_start_notif(struct iwl_priv *priv,
2413 struct iwl_rx_mem_buffer *rxb)
2414 {
2415 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2416 struct iwl_scanstart_notification *notif =
2417 (struct iwl_scanstart_notification *)pkt->u.raw;
2418 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
2419 IWL_DEBUG_SCAN("Scan start: "
2420 "%d [802.11%s] "
2421 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
2422 notif->channel,
2423 notif->band ? "bg" : "a",
2424 notif->tsf_high,
2425 notif->tsf_low, notif->status, notif->beacon_timer);
2426 }
2427
2428 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
2429 static void iwl3945_rx_scan_results_notif(struct iwl_priv *priv,
2430 struct iwl_rx_mem_buffer *rxb)
2431 {
2432 #ifdef CONFIG_IWLWIFI_DEBUG
2433 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2434 struct iwl_scanresults_notification *notif =
2435 (struct iwl_scanresults_notification *)pkt->u.raw;
2436 #endif
2437
2438 IWL_DEBUG_SCAN("Scan ch.res: "
2439 "%d [802.11%s] "
2440 "(TSF: 0x%08X:%08X) - %d "
2441 "elapsed=%lu usec (%dms since last)\n",
2442 notif->channel,
2443 notif->band ? "bg" : "a",
2444 le32_to_cpu(notif->tsf_high),
2445 le32_to_cpu(notif->tsf_low),
2446 le32_to_cpu(notif->statistics[0]),
2447 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
2448 jiffies_to_msecs(elapsed_jiffies
2449 (priv->last_scan_jiffies, jiffies)));
2450
2451 priv->last_scan_jiffies = jiffies;
2452 priv->next_scan_jiffies = 0;
2453 }
2454
2455 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
2456 static void iwl3945_rx_scan_complete_notif(struct iwl_priv *priv,
2457 struct iwl_rx_mem_buffer *rxb)
2458 {
2459 #ifdef CONFIG_IWLWIFI_DEBUG
2460 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2461 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
2462 #endif
2463
2464 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
2465 scan_notif->scanned_channels,
2466 scan_notif->tsf_low,
2467 scan_notif->tsf_high, scan_notif->status);
2468
2469 /* The HW is no longer scanning */
2470 clear_bit(STATUS_SCAN_HW, &priv->status);
2471
2472 /* The scan completion notification came in, so kill that timer... */
2473 cancel_delayed_work(&priv->scan_check);
2474
2475 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
2476 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
2477 "2.4" : "5.2",
2478 jiffies_to_msecs(elapsed_jiffies
2479 (priv->scan_pass_start, jiffies)));
2480
2481 /* Remove this scanned band from the list of pending
2482 * bands to scan, band G precedes A in order of scanning
2483 * as seen in iwl3945_bg_request_scan */
2484 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
2485 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
2486 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
2487 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
2488
2489 /* If a request to abort was given, or the scan did not succeed
2490 * then we reset the scan state machine and terminate,
2491 * re-queuing another scan if one has been requested */
2492 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2493 IWL_DEBUG_INFO("Aborted scan completed.\n");
2494 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
2495 } else {
2496 /* If there are more bands on this scan pass reschedule */
2497 if (priv->scan_bands > 0)
2498 goto reschedule;
2499 }
2500
2501 priv->last_scan_jiffies = jiffies;
2502 priv->next_scan_jiffies = 0;
2503 IWL_DEBUG_INFO("Setting scan to off\n");
2504
2505 clear_bit(STATUS_SCANNING, &priv->status);
2506
2507 IWL_DEBUG_INFO("Scan took %dms\n",
2508 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
2509
2510 queue_work(priv->workqueue, &priv->scan_completed);
2511
2512 return;
2513
2514 reschedule:
2515 priv->scan_pass_start = jiffies;
2516 queue_work(priv->workqueue, &priv->request_scan);
2517 }
2518
2519 /* Handle notification from uCode that card's power state is changing
2520 * due to software, hardware, or critical temperature RFKILL */
2521 static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
2522 struct iwl_rx_mem_buffer *rxb)
2523 {
2524 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2525 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
2526 unsigned long status = priv->status;
2527
2528 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
2529 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
2530 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
2531
2532 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2533 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2534
2535 if (flags & HW_CARD_DISABLED)
2536 set_bit(STATUS_RF_KILL_HW, &priv->status);
2537 else
2538 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2539
2540
2541 if (flags & SW_CARD_DISABLED)
2542 set_bit(STATUS_RF_KILL_SW, &priv->status);
2543 else
2544 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2545
2546 iwl_scan_cancel(priv);
2547
2548 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
2549 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
2550 (test_bit(STATUS_RF_KILL_SW, &status) !=
2551 test_bit(STATUS_RF_KILL_SW, &priv->status)))
2552 queue_work(priv->workqueue, &priv->rf_kill);
2553 else
2554 wake_up_interruptible(&priv->wait_command_queue);
2555 }
2556
2557 /**
2558 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
2559 *
2560 * Setup the RX handlers for each of the reply types sent from the uCode
2561 * to the host.
2562 *
2563 * This function chains into the hardware specific files for them to setup
2564 * any hardware specific handlers as well.
2565 */
2566 static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
2567 {
2568 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
2569 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
2570 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
2571 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa;
2572 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
2573 iwl3945_rx_spectrum_measure_notif;
2574 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
2575 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
2576 iwl3945_rx_pm_debug_statistics_notif;
2577 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
2578
2579 /*
2580 * The same handler is used for both the REPLY to a discrete
2581 * statistics request from the host as well as for the periodic
2582 * statistics notifications (after received beacons) from the uCode.
2583 */
2584 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
2585 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
2586
2587 priv->rx_handlers[REPLY_SCAN_CMD] = iwl3945_rx_reply_scan;
2588 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl3945_rx_scan_start_notif;
2589 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
2590 iwl3945_rx_scan_results_notif;
2591 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
2592 iwl3945_rx_scan_complete_notif;
2593 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
2594
2595 /* Set up hardware specific Rx handlers */
2596 iwl3945_hw_rx_handler_setup(priv);
2597 }
2598
2599 /**
2600 * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
2601 * When FW advances 'R' index, all entries between old and new 'R' index
2602 * need to be reclaimed.
2603 */
2604 static void iwl3945_cmd_queue_reclaim(struct iwl_priv *priv,
2605 int txq_id, int index)
2606 {
2607 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2608 struct iwl_queue *q = &txq->q;
2609 int nfreed = 0;
2610
2611 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
2612 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
2613 "is out of range [0-%d] %d %d.\n", txq_id,
2614 index, q->n_bd, q->write_ptr, q->read_ptr);
2615 return;
2616 }
2617
2618 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
2619 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2620 if (nfreed > 1) {
2621 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", index,
2622 q->write_ptr, q->read_ptr);
2623 queue_work(priv->workqueue, &priv->restart);
2624 break;
2625 }
2626 nfreed++;
2627 }
2628 }
2629
2630
2631 /**
2632 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
2633 * @rxb: Rx buffer to reclaim
2634 *
2635 * If an Rx buffer has an async callback associated with it the callback
2636 * will be executed. The attached skb (if present) will only be freed
2637 * if the callback returns 1
2638 */
2639 static void iwl3945_tx_cmd_complete(struct iwl_priv *priv,
2640 struct iwl_rx_mem_buffer *rxb)
2641 {
2642 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2643 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2644 int txq_id = SEQ_TO_QUEUE(sequence);
2645 int index = SEQ_TO_INDEX(sequence);
2646 int huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
2647 int cmd_index;
2648 struct iwl_cmd *cmd;
2649
2650 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
2651 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
2652 txq_id, sequence,
2653 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
2654 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
2655 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
2656 return;
2657 }
2658
2659 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
2660 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
2661
2662 /* Input error checking is done when commands are added to queue. */
2663 if (cmd->meta.flags & CMD_WANT_SKB) {
2664 cmd->meta.source->u.skb = rxb->skb;
2665 rxb->skb = NULL;
2666 } else if (cmd->meta.u.callback &&
2667 !cmd->meta.u.callback(priv, cmd, rxb->skb))
2668 rxb->skb = NULL;
2669
2670 iwl3945_cmd_queue_reclaim(priv, txq_id, index);
2671
2672 if (!(cmd->meta.flags & CMD_ASYNC)) {
2673 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
2674 wake_up_interruptible(&priv->wait_command_queue);
2675 }
2676 }
2677
2678 /************************** RX-FUNCTIONS ****************************/
2679 /*
2680 * Rx theory of operation
2681 *
2682 * The host allocates 32 DMA target addresses and passes the host address
2683 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
2684 * 0 to 31
2685 *
2686 * Rx Queue Indexes
2687 * The host/firmware share two index registers for managing the Rx buffers.
2688 *
2689 * The READ index maps to the first position that the firmware may be writing
2690 * to -- the driver can read up to (but not including) this position and get
2691 * good data.
2692 * The READ index is managed by the firmware once the card is enabled.
2693 *
2694 * The WRITE index maps to the last position the driver has read from -- the
2695 * position preceding WRITE is the last slot the firmware can place a packet.
2696 *
2697 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2698 * WRITE = READ.
2699 *
2700 * During initialization, the host sets up the READ queue position to the first
2701 * INDEX position, and WRITE to the last (READ - 1 wrapped)
2702 *
2703 * When the firmware places a packet in a buffer, it will advance the READ index
2704 * and fire the RX interrupt. The driver can then query the READ index and
2705 * process as many packets as possible, moving the WRITE index forward as it
2706 * resets the Rx queue buffers with new memory.
2707 *
2708 * The management in the driver is as follows:
2709 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2710 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2711 * to replenish the iwl->rxq->rx_free.
2712 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
2713 * iwl->rxq is replenished and the READ INDEX is updated (updating the
2714 * 'processed' and 'read' driver indexes as well)
2715 * + A received packet is processed and handed to the kernel network stack,
2716 * detached from the iwl->rxq. The driver 'processed' index is updated.
2717 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2718 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2719 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
2720 * were enough free buffers and RX_STALLED is set it is cleared.
2721 *
2722 *
2723 * Driver sequence:
2724 *
2725 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
2726 * iwl3945_rx_queue_restock
2727 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
2728 * queue, updates firmware pointers, and updates
2729 * the WRITE index. If insufficient rx_free buffers
2730 * are available, schedules iwl3945_rx_replenish
2731 *
2732 * -- enable interrupts --
2733 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
2734 * READ INDEX, detaching the SKB from the pool.
2735 * Moves the packet buffer from queue to rx_used.
2736 * Calls iwl3945_rx_queue_restock to refill any empty
2737 * slots.
2738 * ...
2739 *
2740 */
2741
2742 /**
2743 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
2744 */
2745 static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
2746 dma_addr_t dma_addr)
2747 {
2748 return cpu_to_le32((u32)dma_addr);
2749 }
2750
2751 /**
2752 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
2753 *
2754 * If there are slots in the RX queue that need to be restocked,
2755 * and we have free pre-allocated buffers, fill the ranks as much
2756 * as we can, pulling from rx_free.
2757 *
2758 * This moves the 'write' index forward to catch up with 'processed', and
2759 * also updates the memory address in the firmware to reference the new
2760 * target buffer.
2761 */
2762 static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
2763 {
2764 struct iwl_rx_queue *rxq = &priv->rxq;
2765 struct list_head *element;
2766 struct iwl_rx_mem_buffer *rxb;
2767 unsigned long flags;
2768 int write, rc;
2769
2770 spin_lock_irqsave(&rxq->lock, flags);
2771 write = rxq->write & ~0x7;
2772 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
2773 /* Get next free Rx buffer, remove from free list */
2774 element = rxq->rx_free.next;
2775 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
2776 list_del(element);
2777
2778 /* Point to Rx buffer via next RBD in circular buffer */
2779 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
2780 rxq->queue[rxq->write] = rxb;
2781 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
2782 rxq->free_count--;
2783 }
2784 spin_unlock_irqrestore(&rxq->lock, flags);
2785 /* If the pre-allocated buffer pool is dropping low, schedule to
2786 * refill it */
2787 if (rxq->free_count <= RX_LOW_WATERMARK)
2788 queue_work(priv->workqueue, &priv->rx_replenish);
2789
2790
2791 /* If we've added more space for the firmware to place data, tell it.
2792 * Increment device's write pointer in multiples of 8. */
2793 if ((write != (rxq->write & ~0x7))
2794 || (abs(rxq->write - rxq->read) > 7)) {
2795 spin_lock_irqsave(&rxq->lock, flags);
2796 rxq->need_update = 1;
2797 spin_unlock_irqrestore(&rxq->lock, flags);
2798 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
2799 if (rc)
2800 return rc;
2801 }
2802
2803 return 0;
2804 }
2805
2806 /**
2807 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
2808 *
2809 * When moving to rx_free an SKB is allocated for the slot.
2810 *
2811 * Also restock the Rx queue via iwl3945_rx_queue_restock.
2812 * This is called as a scheduled work item (except for during initialization)
2813 */
2814 static void iwl3945_rx_allocate(struct iwl_priv *priv)
2815 {
2816 struct iwl_rx_queue *rxq = &priv->rxq;
2817 struct list_head *element;
2818 struct iwl_rx_mem_buffer *rxb;
2819 unsigned long flags;
2820 spin_lock_irqsave(&rxq->lock, flags);
2821 while (!list_empty(&rxq->rx_used)) {
2822 element = rxq->rx_used.next;
2823 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
2824
2825 /* Alloc a new receive buffer */
2826 rxb->skb =
2827 alloc_skb(priv->hw_params.rx_buf_size,
2828 __GFP_NOWARN | GFP_ATOMIC);
2829 if (!rxb->skb) {
2830 if (net_ratelimit())
2831 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
2832 /* We don't reschedule replenish work here -- we will
2833 * call the restock method and if it still needs
2834 * more buffers it will schedule replenish */
2835 break;
2836 }
2837
2838 /* If radiotap head is required, reserve some headroom here.
2839 * The physical head count is a variable rx_stats->phy_count.
2840 * We reserve 4 bytes here. Plus these extra bytes, the
2841 * headroom of the physical head should be enough for the
2842 * radiotap head that iwl3945 supported. See iwl3945_rt.
2843 */
2844 skb_reserve(rxb->skb, 4);
2845
2846 priv->alloc_rxb_skb++;
2847 list_del(element);
2848
2849 /* Get physical address of RB/SKB */
2850 rxb->real_dma_addr = pci_map_single(priv->pci_dev,
2851 rxb->skb->data,
2852 priv->hw_params.rx_buf_size,
2853 PCI_DMA_FROMDEVICE);
2854 list_add_tail(&rxb->list, &rxq->rx_free);
2855 rxq->free_count++;
2856 }
2857 spin_unlock_irqrestore(&rxq->lock, flags);
2858 }
2859
2860 /*
2861 * this should be called while priv->lock is locked
2862 */
2863 static void __iwl3945_rx_replenish(void *data)
2864 {
2865 struct iwl_priv *priv = data;
2866
2867 iwl3945_rx_allocate(priv);
2868 iwl3945_rx_queue_restock(priv);
2869 }
2870
2871
2872 void iwl3945_rx_replenish(void *data)
2873 {
2874 struct iwl_priv *priv = data;
2875 unsigned long flags;
2876
2877 iwl3945_rx_allocate(priv);
2878
2879 spin_lock_irqsave(&priv->lock, flags);
2880 iwl3945_rx_queue_restock(priv);
2881 spin_unlock_irqrestore(&priv->lock, flags);
2882 }
2883
2884 /* Convert linear signal-to-noise ratio into dB */
2885 static u8 ratio2dB[100] = {
2886 /* 0 1 2 3 4 5 6 7 8 9 */
2887 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
2888 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
2889 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
2890 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
2891 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
2892 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
2893 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
2894 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
2895 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
2896 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
2897 };
2898
2899 /* Calculates a relative dB value from a ratio of linear
2900 * (i.e. not dB) signal levels.
2901 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
2902 int iwl3945_calc_db_from_ratio(int sig_ratio)
2903 {
2904 /* 1000:1 or higher just report as 60 dB */
2905 if (sig_ratio >= 1000)
2906 return 60;
2907
2908 /* 100:1 or higher, divide by 10 and use table,
2909 * add 20 dB to make up for divide by 10 */
2910 if (sig_ratio >= 100)
2911 return 20 + (int)ratio2dB[sig_ratio/10];
2912
2913 /* We shouldn't see this */
2914 if (sig_ratio < 1)
2915 return 0;
2916
2917 /* Use table for ratios 1:1 - 99:1 */
2918 return (int)ratio2dB[sig_ratio];
2919 }
2920
2921 #define PERFECT_RSSI (-20) /* dBm */
2922 #define WORST_RSSI (-95) /* dBm */
2923 #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
2924
2925 /* Calculate an indication of rx signal quality (a percentage, not dBm!).
2926 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
2927 * about formulas used below. */
2928 int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
2929 {
2930 int sig_qual;
2931 int degradation = PERFECT_RSSI - rssi_dbm;
2932
2933 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
2934 * as indicator; formula is (signal dbm - noise dbm).
2935 * SNR at or above 40 is a great signal (100%).
2936 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
2937 * Weakest usable signal is usually 10 - 15 dB SNR. */
2938 if (noise_dbm) {
2939 if (rssi_dbm - noise_dbm >= 40)
2940 return 100;
2941 else if (rssi_dbm < noise_dbm)
2942 return 0;
2943 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
2944
2945 /* Else use just the signal level.
2946 * This formula is a least squares fit of data points collected and
2947 * compared with a reference system that had a percentage (%) display
2948 * for signal quality. */
2949 } else
2950 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
2951 (15 * RSSI_RANGE + 62 * degradation)) /
2952 (RSSI_RANGE * RSSI_RANGE);
2953
2954 if (sig_qual > 100)
2955 sig_qual = 100;
2956 else if (sig_qual < 1)
2957 sig_qual = 0;
2958
2959 return sig_qual;
2960 }
2961
2962 /**
2963 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
2964 *
2965 * Uses the priv->rx_handlers callback function array to invoke
2966 * the appropriate handlers, including command responses,
2967 * frame-received notifications, and other notifications.
2968 */
2969 static void iwl3945_rx_handle(struct iwl_priv *priv)
2970 {
2971 struct iwl_rx_mem_buffer *rxb;
2972 struct iwl_rx_packet *pkt;
2973 struct iwl_rx_queue *rxq = &priv->rxq;
2974 u32 r, i;
2975 int reclaim;
2976 unsigned long flags;
2977 u8 fill_rx = 0;
2978 u32 count = 8;
2979
2980 /* uCode's read index (stored in shared DRAM) indicates the last Rx
2981 * buffer that the driver may process (last buffer filled by ucode). */
2982 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
2983 i = rxq->read;
2984
2985 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
2986 fill_rx = 1;
2987 /* Rx interrupt, but nothing sent from uCode */
2988 if (i == r)
2989 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
2990
2991 while (i != r) {
2992 rxb = rxq->queue[i];
2993
2994 /* If an RXB doesn't have a Rx queue slot associated with it,
2995 * then a bug has been introduced in the queue refilling
2996 * routines -- catch it here */
2997 BUG_ON(rxb == NULL);
2998
2999 rxq->queue[i] = NULL;
3000
3001 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
3002 priv->hw_params.rx_buf_size,
3003 PCI_DMA_FROMDEVICE);
3004 pkt = (struct iwl_rx_packet *)rxb->skb->data;
3005
3006 /* Reclaim a command buffer only if this packet is a response
3007 * to a (driver-originated) command.
3008 * If the packet (e.g. Rx frame) originated from uCode,
3009 * there is no command buffer to reclaim.
3010 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3011 * but apparently a few don't get set; catch them here. */
3012 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3013 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
3014 (pkt->hdr.cmd != REPLY_TX);
3015
3016 /* Based on type of command response or notification,
3017 * handle those that need handling via function in
3018 * rx_handlers table. See iwl3945_setup_rx_handlers() */
3019 if (priv->rx_handlers[pkt->hdr.cmd]) {
3020 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
3021 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3022 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3023 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3024 } else {
3025 /* No handling needed */
3026 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
3027 "r %d i %d No handler needed for %s, 0x%02x\n",
3028 r, i, get_cmd_string(pkt->hdr.cmd),
3029 pkt->hdr.cmd);
3030 }
3031
3032 if (reclaim) {
3033 /* Invoke any callbacks, transfer the skb to caller, and
3034 * fire off the (possibly) blocking iwl_send_cmd()
3035 * as we reclaim the driver command queue */
3036 if (rxb && rxb->skb)
3037 iwl3945_tx_cmd_complete(priv, rxb);
3038 else
3039 IWL_WARN(priv, "Claim null rxb?\n");
3040 }
3041
3042 /* For now we just don't re-use anything. We can tweak this
3043 * later to try and re-use notification packets and SKBs that
3044 * fail to Rx correctly */
3045 if (rxb->skb != NULL) {
3046 priv->alloc_rxb_skb--;
3047 dev_kfree_skb_any(rxb->skb);
3048 rxb->skb = NULL;
3049 }
3050
3051 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
3052 priv->hw_params.rx_buf_size,
3053 PCI_DMA_FROMDEVICE);
3054 spin_lock_irqsave(&rxq->lock, flags);
3055 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3056 spin_unlock_irqrestore(&rxq->lock, flags);
3057 i = (i + 1) & RX_QUEUE_MASK;
3058 /* If there are a lot of unused frames,
3059 * restock the Rx queue so ucode won't assert. */
3060 if (fill_rx) {
3061 count++;
3062 if (count >= 8) {
3063 priv->rxq.read = i;
3064 __iwl3945_rx_replenish(priv);
3065 count = 0;
3066 }
3067 }
3068 }
3069
3070 /* Backtrack one entry */
3071 priv->rxq.read = i;
3072 iwl3945_rx_queue_restock(priv);
3073 }
3074
3075 #ifdef CONFIG_IWL3945_DEBUG
3076 static void iwl3945_print_rx_config_cmd(struct iwl_priv *priv,
3077 struct iwl3945_rxon_cmd *rxon)
3078 {
3079 IWL_DEBUG_RADIO("RX CONFIG:\n");
3080 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
3081 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3082 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3083 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3084 le32_to_cpu(rxon->filter_flags));
3085 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3086 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3087 rxon->ofdm_basic_rates);
3088 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
3089 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3090 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
3091 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3092 }
3093 #endif
3094
3095 static void iwl3945_enable_interrupts(struct iwl_priv *priv)
3096 {
3097 IWL_DEBUG_ISR("Enabling interrupts\n");
3098 set_bit(STATUS_INT_ENABLED, &priv->status);
3099 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
3100 }
3101
3102
3103 /* call this function to flush any scheduled tasklet */
3104 static inline void iwl_synchronize_irq(struct iwl_priv *priv)
3105 {
3106 /* wait to make sure we flush pending tasklet*/
3107 synchronize_irq(priv->pci_dev->irq);
3108 tasklet_kill(&priv->irq_tasklet);
3109 }
3110
3111
3112 static inline void iwl3945_disable_interrupts(struct iwl_priv *priv)
3113 {
3114 clear_bit(STATUS_INT_ENABLED, &priv->status);
3115
3116 /* disable interrupts from uCode/NIC to host */
3117 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
3118
3119 /* acknowledge/clear/reset any interrupts still pending
3120 * from uCode or flow handler (Rx/Tx DMA) */
3121 iwl_write32(priv, CSR_INT, 0xffffffff);
3122 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
3123 IWL_DEBUG_ISR("Disabled interrupts\n");
3124 }
3125
3126 static const char *desc_lookup(int i)
3127 {
3128 switch (i) {
3129 case 1:
3130 return "FAIL";
3131 case 2:
3132 return "BAD_PARAM";
3133 case 3:
3134 return "BAD_CHECKSUM";
3135 case 4:
3136 return "NMI_INTERRUPT";
3137 case 5:
3138 return "SYSASSERT";
3139 case 6:
3140 return "FATAL_ERROR";
3141 }
3142
3143 return "UNKNOWN";
3144 }
3145
3146 #define ERROR_START_OFFSET (1 * sizeof(u32))
3147 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
3148
3149 static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
3150 {
3151 u32 i;
3152 u32 desc, time, count, base, data1;
3153 u32 blink1, blink2, ilink1, ilink2;
3154 int rc;
3155
3156 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
3157
3158 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
3159 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
3160 return;
3161 }
3162
3163 rc = iwl_grab_nic_access(priv);
3164 if (rc) {
3165 IWL_WARN(priv, "Can not read from adapter at this time.\n");
3166 return;
3167 }
3168
3169 count = iwl_read_targ_mem(priv, base);
3170
3171 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
3172 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
3173 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
3174 priv->status, count);
3175 }
3176
3177 IWL_ERR(priv, "Desc Time asrtPC blink2 "
3178 "ilink1 nmiPC Line\n");
3179 for (i = ERROR_START_OFFSET;
3180 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
3181 i += ERROR_ELEM_SIZE) {
3182 desc = iwl_read_targ_mem(priv, base + i);
3183 time =
3184 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
3185 blink1 =
3186 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
3187 blink2 =
3188 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
3189 ilink1 =
3190 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
3191 ilink2 =
3192 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
3193 data1 =
3194 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
3195
3196 IWL_ERR(priv,
3197 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
3198 desc_lookup(desc), desc, time, blink1, blink2,
3199 ilink1, ilink2, data1);
3200 }
3201
3202 iwl_release_nic_access(priv);
3203
3204 }
3205
3206 #define EVENT_START_OFFSET (6 * sizeof(u32))
3207
3208 /**
3209 * iwl3945_print_event_log - Dump error event log to syslog
3210 *
3211 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
3212 */
3213 static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
3214 u32 num_events, u32 mode)
3215 {
3216 u32 i;
3217 u32 base; /* SRAM byte address of event log header */
3218 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
3219 u32 ptr; /* SRAM byte address of log data */
3220 u32 ev, time, data; /* event log data */
3221
3222 if (num_events == 0)
3223 return;
3224
3225 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
3226
3227 if (mode == 0)
3228 event_size = 2 * sizeof(u32);
3229 else
3230 event_size = 3 * sizeof(u32);
3231
3232 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
3233
3234 /* "time" is actually "data" for mode 0 (no timestamp).
3235 * place event id # at far right for easier visual parsing. */
3236 for (i = 0; i < num_events; i++) {
3237 ev = iwl_read_targ_mem(priv, ptr);
3238 ptr += sizeof(u32);
3239 time = iwl_read_targ_mem(priv, ptr);
3240 ptr += sizeof(u32);
3241 if (mode == 0) {
3242 /* data, ev */
3243 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
3244 } else {
3245 data = iwl_read_targ_mem(priv, ptr);
3246 ptr += sizeof(u32);
3247 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
3248 }
3249 }
3250 }
3251
3252 static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
3253 {
3254 int rc;
3255 u32 base; /* SRAM byte address of event log header */
3256 u32 capacity; /* event log capacity in # entries */
3257 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
3258 u32 num_wraps; /* # times uCode wrapped to top of log */
3259 u32 next_entry; /* index of next entry to be written by uCode */
3260 u32 size; /* # entries that we'll print */
3261
3262 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
3263 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
3264 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
3265 return;
3266 }
3267
3268 rc = iwl_grab_nic_access(priv);
3269 if (rc) {
3270 IWL_WARN(priv, "Can not read from adapter at this time.\n");
3271 return;
3272 }
3273
3274 /* event log header */
3275 capacity = iwl_read_targ_mem(priv, base);
3276 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
3277 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
3278 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
3279
3280 size = num_wraps ? capacity : next_entry;
3281
3282 /* bail out if nothing in log */
3283 if (size == 0) {
3284 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
3285 iwl_release_nic_access(priv);
3286 return;
3287 }
3288
3289 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
3290 size, num_wraps);
3291
3292 /* if uCode has wrapped back to top of log, start at the oldest entry,
3293 * i.e the next one that uCode would fill. */
3294 if (num_wraps)
3295 iwl3945_print_event_log(priv, next_entry,
3296 capacity - next_entry, mode);
3297
3298 /* (then/else) start at top of log */
3299 iwl3945_print_event_log(priv, 0, next_entry, mode);
3300
3301 iwl_release_nic_access(priv);
3302 }
3303
3304 /**
3305 * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
3306 */
3307 static void iwl3945_irq_handle_error(struct iwl_priv *priv)
3308 {
3309 /* Set the FW error flag -- cleared on iwl3945_down */
3310 set_bit(STATUS_FW_ERROR, &priv->status);
3311
3312 /* Cancel currently queued command. */
3313 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3314
3315 #ifdef CONFIG_IWL3945_DEBUG
3316 if (priv->debug_level & IWL_DL_FW_ERRORS) {
3317 iwl3945_dump_nic_error_log(priv);
3318 iwl3945_dump_nic_event_log(priv);
3319 iwl3945_print_rx_config_cmd(priv, &priv->staging39_rxon);
3320 }
3321 #endif
3322
3323 wake_up_interruptible(&priv->wait_command_queue);
3324
3325 /* Keep the restart process from trying to send host
3326 * commands by clearing the INIT status bit */
3327 clear_bit(STATUS_READY, &priv->status);
3328
3329 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3330 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
3331 "Restarting adapter due to uCode error.\n");
3332
3333 if (iwl3945_is_associated(priv)) {
3334 memcpy(&priv->recovery39_rxon, &priv->active39_rxon,
3335 sizeof(priv->recovery39_rxon));
3336 priv->error_recovering = 1;
3337 }
3338 queue_work(priv->workqueue, &priv->restart);
3339 }
3340 }
3341
3342 static void iwl3945_error_recovery(struct iwl_priv *priv)
3343 {
3344 unsigned long flags;
3345
3346 memcpy(&priv->staging39_rxon, &priv->recovery39_rxon,
3347 sizeof(priv->staging39_rxon));
3348 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3349 iwl3945_commit_rxon(priv);
3350
3351 iwl3945_add_station(priv, priv->bssid, 1, 0);
3352
3353 spin_lock_irqsave(&priv->lock, flags);
3354 priv->assoc_id = le16_to_cpu(priv->staging39_rxon.assoc_id);
3355 priv->error_recovering = 0;
3356 spin_unlock_irqrestore(&priv->lock, flags);
3357 }
3358
3359 static void iwl3945_irq_tasklet(struct iwl_priv *priv)
3360 {
3361 u32 inta, handled = 0;
3362 u32 inta_fh;
3363 unsigned long flags;
3364 #ifdef CONFIG_IWL3945_DEBUG
3365 u32 inta_mask;
3366 #endif
3367
3368 spin_lock_irqsave(&priv->lock, flags);
3369
3370 /* Ack/clear/reset pending uCode interrupts.
3371 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
3372 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
3373 inta = iwl_read32(priv, CSR_INT);
3374 iwl_write32(priv, CSR_INT, inta);
3375
3376 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
3377 * Any new interrupts that happen after this, either while we're
3378 * in this tasklet, or later, will show up in next ISR/tasklet. */
3379 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
3380 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
3381
3382 #ifdef CONFIG_IWL3945_DEBUG
3383 if (priv->debug_level & IWL_DL_ISR) {
3384 /* just for debug */
3385 inta_mask = iwl_read32(priv, CSR_INT_MASK);
3386 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3387 inta, inta_mask, inta_fh);
3388 }
3389 #endif
3390
3391 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
3392 * atomic, make sure that inta covers all the interrupts that
3393 * we've discovered, even if FH interrupt came in just after
3394 * reading CSR_INT. */
3395 if (inta_fh & CSR39_FH_INT_RX_MASK)
3396 inta |= CSR_INT_BIT_FH_RX;
3397 if (inta_fh & CSR39_FH_INT_TX_MASK)
3398 inta |= CSR_INT_BIT_FH_TX;
3399
3400 /* Now service all interrupt bits discovered above. */
3401 if (inta & CSR_INT_BIT_HW_ERR) {
3402 IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
3403
3404 /* Tell the device to stop sending interrupts */
3405 iwl3945_disable_interrupts(priv);
3406
3407 iwl3945_irq_handle_error(priv);
3408
3409 handled |= CSR_INT_BIT_HW_ERR;
3410
3411 spin_unlock_irqrestore(&priv->lock, flags);
3412
3413 return;
3414 }
3415
3416 #ifdef CONFIG_IWL3945_DEBUG
3417 if (priv->debug_level & (IWL_DL_ISR)) {
3418 /* NIC fires this, but we don't use it, redundant with WAKEUP */
3419 if (inta & CSR_INT_BIT_SCD)
3420 IWL_DEBUG_ISR("Scheduler finished to transmit "
3421 "the frame/frames.\n");
3422
3423 /* Alive notification via Rx interrupt will do the real work */
3424 if (inta & CSR_INT_BIT_ALIVE)
3425 IWL_DEBUG_ISR("Alive interrupt\n");
3426 }
3427 #endif
3428 /* Safely ignore these bits for debug checks below */
3429 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
3430
3431 /* Error detected by uCode */
3432 if (inta & CSR_INT_BIT_SW_ERR) {
3433 IWL_ERR(priv, "Microcode SW error detected. "
3434 "Restarting 0x%X.\n", inta);
3435 iwl3945_irq_handle_error(priv);
3436 handled |= CSR_INT_BIT_SW_ERR;
3437 }
3438
3439 /* uCode wakes up after power-down sleep */
3440 if (inta & CSR_INT_BIT_WAKEUP) {
3441 IWL_DEBUG_ISR("Wakeup interrupt\n");
3442 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
3443 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
3444 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
3445 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
3446 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
3447 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
3448 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
3449
3450 handled |= CSR_INT_BIT_WAKEUP;
3451 }
3452
3453 /* All uCode command responses, including Tx command responses,
3454 * Rx "responses" (frame-received notification), and other
3455 * notifications from uCode come through here*/
3456 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
3457 iwl3945_rx_handle(priv);
3458 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
3459 }
3460
3461 if (inta & CSR_INT_BIT_FH_TX) {
3462 IWL_DEBUG_ISR("Tx interrupt\n");
3463
3464 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
3465 if (!iwl_grab_nic_access(priv)) {
3466 iwl_write_direct32(priv, FH39_TCSR_CREDIT
3467 (FH39_SRVC_CHNL), 0x0);
3468 iwl_release_nic_access(priv);
3469 }
3470 handled |= CSR_INT_BIT_FH_TX;
3471 }
3472
3473 if (inta & ~handled)
3474 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
3475
3476 if (inta & ~CSR_INI_SET_MASK) {
3477 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
3478 inta & ~CSR_INI_SET_MASK);
3479 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
3480 }
3481
3482 /* Re-enable all interrupts */
3483 /* only Re-enable if disabled by irq */
3484 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3485 iwl3945_enable_interrupts(priv);
3486
3487 #ifdef CONFIG_IWL3945_DEBUG
3488 if (priv->debug_level & (IWL_DL_ISR)) {
3489 inta = iwl_read32(priv, CSR_INT);
3490 inta_mask = iwl_read32(priv, CSR_INT_MASK);
3491 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
3492 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
3493 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
3494 }
3495 #endif
3496 spin_unlock_irqrestore(&priv->lock, flags);
3497 }
3498
3499 static irqreturn_t iwl3945_isr(int irq, void *data)
3500 {
3501 struct iwl_priv *priv = data;
3502 u32 inta, inta_mask;
3503 u32 inta_fh;
3504 if (!priv)
3505 return IRQ_NONE;
3506
3507 spin_lock(&priv->lock);
3508
3509 /* Disable (but don't clear!) interrupts here to avoid
3510 * back-to-back ISRs and sporadic interrupts from our NIC.
3511 * If we have something to service, the tasklet will re-enable ints.
3512 * If we *don't* have something, we'll re-enable before leaving here. */
3513 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
3514 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
3515
3516 /* Discover which interrupts are active/pending */
3517 inta = iwl_read32(priv, CSR_INT);
3518 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
3519
3520 /* Ignore interrupt if there's nothing in NIC to service.
3521 * This may be due to IRQ shared with another device,
3522 * or due to sporadic interrupts thrown from our NIC. */
3523 if (!inta && !inta_fh) {
3524 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
3525 goto none;
3526 }
3527
3528 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
3529 /* Hardware disappeared */
3530 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
3531 goto unplugged;
3532 }
3533
3534 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3535 inta, inta_mask, inta_fh);
3536
3537 inta &= ~CSR_INT_BIT_SCD;
3538
3539 /* iwl3945_irq_tasklet() will service interrupts and re-enable them */
3540 if (likely(inta || inta_fh))
3541 tasklet_schedule(&priv->irq_tasklet);
3542 unplugged:
3543 spin_unlock(&priv->lock);
3544
3545 return IRQ_HANDLED;
3546
3547 none:
3548 /* re-enable interrupts here since we don't have anything to service. */
3549 /* only Re-enable if disabled by irq */
3550 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3551 iwl3945_enable_interrupts(priv);
3552 spin_unlock(&priv->lock);
3553 return IRQ_NONE;
3554 }
3555
3556 /************************** EEPROM BANDS ****************************
3557 *
3558 * The iwl3945_eeprom_band definitions below provide the mapping from the
3559 * EEPROM contents to the specific channel number supported for each
3560 * band.
3561 *
3562 * For example, iwl3945_priv->eeprom39.band_3_channels[4] from the band_3
3563 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
3564 * The specific geography and calibration information for that channel
3565 * is contained in the eeprom map itself.
3566 *
3567 * During init, we copy the eeprom information and channel map
3568 * information into priv->channel_info_24/52 and priv->channel_map_24/52
3569 *
3570 * channel_map_24/52 provides the index in the channel_info array for a
3571 * given channel. We have to have two separate maps as there is channel
3572 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
3573 * band_2
3574 *
3575 * A value of 0xff stored in the channel_map indicates that the channel
3576 * is not supported by the hardware at all.
3577 *
3578 * A value of 0xfe in the channel_map indicates that the channel is not
3579 * valid for Tx with the current hardware. This means that
3580 * while the system can tune and receive on a given channel, it may not
3581 * be able to associate or transmit any frames on that
3582 * channel. There is no corresponding channel information for that
3583 * entry.
3584 *
3585 *********************************************************************/
3586
3587 /* 2.4 GHz */
3588 static const u8 iwl3945_eeprom_band_1[14] = {
3589 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
3590 };
3591
3592 /* 5.2 GHz bands */
3593 static const u8 iwl3945_eeprom_band_2[] = { /* 4915-5080MHz */
3594 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
3595 };
3596
3597 static const u8 iwl3945_eeprom_band_3[] = { /* 5170-5320MHz */
3598 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
3599 };
3600
3601 static const u8 iwl3945_eeprom_band_4[] = { /* 5500-5700MHz */
3602 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
3603 };
3604
3605 static const u8 iwl3945_eeprom_band_5[] = { /* 5725-5825MHz */
3606 145, 149, 153, 157, 161, 165
3607 };
3608
3609 static void iwl3945_init_band_reference(const struct iwl_priv *priv, int band,
3610 int *eeprom_ch_count,
3611 const struct iwl_eeprom_channel
3612 **eeprom_ch_info,
3613 const u8 **eeprom_ch_index)
3614 {
3615 switch (band) {
3616 case 1: /* 2.4GHz band */
3617 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
3618 *eeprom_ch_info = priv->eeprom39.band_1_channels;
3619 *eeprom_ch_index = iwl3945_eeprom_band_1;
3620 break;
3621 case 2: /* 4.9GHz band */
3622 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
3623 *eeprom_ch_info = priv->eeprom39.band_2_channels;
3624 *eeprom_ch_index = iwl3945_eeprom_band_2;
3625 break;
3626 case 3: /* 5.2GHz band */
3627 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
3628 *eeprom_ch_info = priv->eeprom39.band_3_channels;
3629 *eeprom_ch_index = iwl3945_eeprom_band_3;
3630 break;
3631 case 4: /* 5.5GHz band */
3632 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
3633 *eeprom_ch_info = priv->eeprom39.band_4_channels;
3634 *eeprom_ch_index = iwl3945_eeprom_band_4;
3635 break;
3636 case 5: /* 5.7GHz band */
3637 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
3638 *eeprom_ch_info = priv->eeprom39.band_5_channels;
3639 *eeprom_ch_index = iwl3945_eeprom_band_5;
3640 break;
3641 default:
3642 BUG();
3643 return;
3644 }
3645 }
3646
3647 /**
3648 * iwl3945_get_channel_info - Find driver's private channel info
3649 *
3650 * Based on band and channel number.
3651 */
3652 const struct iwl_channel_info *
3653 iwl3945_get_channel_info(const struct iwl_priv *priv,
3654 enum ieee80211_band band, u16 channel)
3655 {
3656 int i;
3657
3658 switch (band) {
3659 case IEEE80211_BAND_5GHZ:
3660 for (i = 14; i < priv->channel_count; i++) {
3661 if (priv->channel_info[i].channel == channel)
3662 return &priv->channel_info[i];
3663 }
3664 break;
3665
3666 case IEEE80211_BAND_2GHZ:
3667 if (channel >= 1 && channel <= 14)
3668 return &priv->channel_info[channel - 1];
3669 break;
3670 case IEEE80211_NUM_BANDS:
3671 WARN_ON(1);
3672 }
3673
3674 return NULL;
3675 }
3676
3677 #define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
3678 ? # x " " : "")
3679
3680 /**
3681 * iwl3945_init_channel_map - Set up driver's info for all possible channels
3682 */
3683 static int iwl3945_init_channel_map(struct iwl_priv *priv)
3684 {
3685 int eeprom_ch_count = 0;
3686 const u8 *eeprom_ch_index = NULL;
3687 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
3688 int band, ch;
3689 struct iwl_channel_info *ch_info;
3690
3691 if (priv->channel_count) {
3692 IWL_DEBUG_INFO("Channel map already initialized.\n");
3693 return 0;
3694 }
3695
3696 if (priv->eeprom39.version < 0x2f) {
3697 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3698 priv->eeprom39.version);
3699 return -EINVAL;
3700 }
3701
3702 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
3703
3704 priv->channel_count =
3705 ARRAY_SIZE(iwl3945_eeprom_band_1) +
3706 ARRAY_SIZE(iwl3945_eeprom_band_2) +
3707 ARRAY_SIZE(iwl3945_eeprom_band_3) +
3708 ARRAY_SIZE(iwl3945_eeprom_band_4) +
3709 ARRAY_SIZE(iwl3945_eeprom_band_5);
3710
3711 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
3712
3713 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
3714 priv->channel_count, GFP_KERNEL);
3715 if (!priv->channel_info) {
3716 IWL_ERR(priv, "Could not allocate channel_info\n");
3717 priv->channel_count = 0;
3718 return -ENOMEM;
3719 }
3720
3721 ch_info = priv->channel_info;
3722
3723 /* Loop through the 5 EEPROM bands adding them in order to the
3724 * channel map we maintain (that contains additional information than
3725 * what just in the EEPROM) */
3726 for (band = 1; band <= 5; band++) {
3727
3728 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
3729 &eeprom_ch_info, &eeprom_ch_index);
3730
3731 /* Loop through each band adding each of the channels */
3732 for (ch = 0; ch < eeprom_ch_count; ch++) {
3733 ch_info->channel = eeprom_ch_index[ch];
3734 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
3735 IEEE80211_BAND_5GHZ;
3736
3737 /* permanently store EEPROM's channel regulatory flags
3738 * and max power in channel info database. */
3739 ch_info->eeprom = eeprom_ch_info[ch];
3740
3741 /* Copy the run-time flags so they are there even on
3742 * invalid channels */
3743 ch_info->flags = eeprom_ch_info[ch].flags;
3744
3745 if (!(is_channel_valid(ch_info))) {
3746 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
3747 "No traffic\n",
3748 ch_info->channel,
3749 ch_info->flags,
3750 is_channel_a_band(ch_info) ?
3751 "5.2" : "2.4");
3752 ch_info++;
3753 continue;
3754 }
3755
3756 /* Initialize regulatory-based run-time data */
3757 ch_info->max_power_avg = ch_info->curr_txpow =
3758 eeprom_ch_info[ch].max_power_avg;
3759 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
3760 ch_info->min_power = 0;
3761
3762 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
3763 " %ddBm): Ad-Hoc %ssupported\n",
3764 ch_info->channel,
3765 is_channel_a_band(ch_info) ?
3766 "5.2" : "2.4",
3767 CHECK_AND_PRINT(VALID),
3768 CHECK_AND_PRINT(IBSS),
3769 CHECK_AND_PRINT(ACTIVE),
3770 CHECK_AND_PRINT(RADAR),
3771 CHECK_AND_PRINT(WIDE),
3772 CHECK_AND_PRINT(DFS),
3773 eeprom_ch_info[ch].flags,
3774 eeprom_ch_info[ch].max_power_avg,
3775 ((eeprom_ch_info[ch].
3776 flags & EEPROM_CHANNEL_IBSS)
3777 && !(eeprom_ch_info[ch].
3778 flags & EEPROM_CHANNEL_RADAR))
3779 ? "" : "not ");
3780
3781 /* Set the tx_power_user_lmt to the highest power
3782 * supported by any channel */
3783 if (eeprom_ch_info[ch].max_power_avg >
3784 priv->tx_power_user_lmt)
3785 priv->tx_power_user_lmt =
3786 eeprom_ch_info[ch].max_power_avg;
3787
3788 ch_info++;
3789 }
3790 }
3791
3792 /* Set up txpower settings in driver for all channels */
3793 if (iwl3945_txpower_set_from_eeprom(priv))
3794 return -EIO;
3795
3796 return 0;
3797 }
3798
3799 /*
3800 * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
3801 */
3802 static void iwl3945_free_channel_map(struct iwl_priv *priv)
3803 {
3804 kfree(priv->channel_info);
3805 priv->channel_count = 0;
3806 }
3807
3808 static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
3809 enum ieee80211_band band,
3810 u8 is_active, u8 n_probes,
3811 struct iwl3945_scan_channel *scan_ch)
3812 {
3813 const struct ieee80211_channel *channels = NULL;
3814 const struct ieee80211_supported_band *sband;
3815 const struct iwl_channel_info *ch_info;
3816 u16 passive_dwell = 0;
3817 u16 active_dwell = 0;
3818 int added, i;
3819
3820 sband = iwl_get_hw_mode(priv, band);
3821 if (!sband)
3822 return 0;
3823
3824 channels = sband->channels;
3825
3826 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
3827 passive_dwell = iwl_get_passive_dwell_time(priv, band);
3828
3829 if (passive_dwell <= active_dwell)
3830 passive_dwell = active_dwell + 1;
3831
3832 for (i = 0, added = 0; i < sband->n_channels; i++) {
3833 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
3834 continue;
3835
3836 scan_ch->channel = channels[i].hw_value;
3837
3838 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
3839 if (!is_channel_valid(ch_info)) {
3840 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
3841 scan_ch->channel);
3842 continue;
3843 }
3844
3845 scan_ch->active_dwell = cpu_to_le16(active_dwell);
3846 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
3847 /* If passive , set up for auto-switch
3848 * and use long active_dwell time.
3849 */
3850 if (!is_active || is_channel_passive(ch_info) ||
3851 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
3852 scan_ch->type = 0; /* passive */
3853 if (IWL_UCODE_API(priv->ucode_ver) == 1)
3854 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
3855 } else {
3856 scan_ch->type = 1; /* active */
3857 }
3858
3859 /* Set direct probe bits. These may be used both for active
3860 * scan channels (probes gets sent right away),
3861 * or for passive channels (probes get se sent only after
3862 * hearing clear Rx packet).*/
3863 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
3864 if (n_probes)
3865 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
3866 } else {
3867 /* uCode v1 does not allow setting direct probe bits on
3868 * passive channel. */
3869 if ((scan_ch->type & 1) && n_probes)
3870 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
3871 }
3872
3873 /* Set txpower levels to defaults */
3874 scan_ch->tpc.dsp_atten = 110;
3875 /* scan_pwr_info->tpc.dsp_atten; */
3876
3877 /*scan_pwr_info->tpc.tx_gain; */
3878 if (band == IEEE80211_BAND_5GHZ)
3879 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
3880 else {
3881 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
3882 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
3883 * power level:
3884 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
3885 */
3886 }
3887
3888 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
3889 scan_ch->channel,
3890 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
3891 (scan_ch->type & 1) ?
3892 active_dwell : passive_dwell);
3893
3894 scan_ch++;
3895 added++;
3896 }
3897
3898 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
3899 return added;
3900 }
3901
3902 static void iwl3945_init_hw_rates(struct iwl_priv *priv,
3903 struct ieee80211_rate *rates)
3904 {
3905 int i;
3906
3907 for (i = 0; i < IWL_RATE_COUNT; i++) {
3908 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
3909 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3910 rates[i].hw_value_short = i;
3911 rates[i].flags = 0;
3912 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
3913 /*
3914 * If CCK != 1M then set short preamble rate flag.
3915 */
3916 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
3917 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3918 }
3919 }
3920 }
3921
3922 /**
3923 * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
3924 */
3925 static int iwl3945_init_geos(struct iwl_priv *priv)
3926 {
3927 struct iwl_channel_info *ch;
3928 struct ieee80211_supported_band *sband;
3929 struct ieee80211_channel *channels;
3930 struct ieee80211_channel *geo_ch;
3931 struct ieee80211_rate *rates;
3932 int i = 0;
3933
3934 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3935 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3936 IWL_DEBUG_INFO("Geography modes already initialized.\n");
3937 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
3938 return 0;
3939 }
3940
3941 channels = kzalloc(sizeof(struct ieee80211_channel) *
3942 priv->channel_count, GFP_KERNEL);
3943 if (!channels)
3944 return -ENOMEM;
3945
3946 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
3947 GFP_KERNEL);
3948 if (!rates) {
3949 kfree(channels);
3950 return -ENOMEM;
3951 }
3952
3953 /* 5.2GHz channels start after the 2.4GHz channels */
3954 sband = &priv->bands[IEEE80211_BAND_5GHZ];
3955 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
3956 /* just OFDM */
3957 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
3958 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
3959
3960 sband = &priv->bands[IEEE80211_BAND_2GHZ];
3961 sband->channels = channels;
3962 /* OFDM & CCK */
3963 sband->bitrates = rates;
3964 sband->n_bitrates = IWL_RATE_COUNT;
3965
3966 priv->ieee_channels = channels;
3967 priv->ieee_rates = rates;
3968
3969 iwl3945_init_hw_rates(priv, rates);
3970
3971 for (i = 0; i < priv->channel_count; i++) {
3972 ch = &priv->channel_info[i];
3973
3974 /* FIXME: might be removed if scan is OK*/
3975 if (!is_channel_valid(ch))
3976 continue;
3977
3978 if (is_channel_a_band(ch))
3979 sband = &priv->bands[IEEE80211_BAND_5GHZ];
3980 else
3981 sband = &priv->bands[IEEE80211_BAND_2GHZ];
3982
3983 geo_ch = &sband->channels[sband->n_channels++];
3984
3985 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
3986 geo_ch->max_power = ch->max_power_avg;
3987 geo_ch->max_antenna_gain = 0xff;
3988 geo_ch->hw_value = ch->channel;
3989
3990 if (is_channel_valid(ch)) {
3991 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3992 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3993
3994 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3995 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3996
3997 if (ch->flags & EEPROM_CHANNEL_RADAR)
3998 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3999
4000 if (ch->max_power_avg > priv->tx_power_channel_lmt)
4001 priv->tx_power_channel_lmt =
4002 ch->max_power_avg;
4003 } else {
4004 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
4005 }
4006
4007 /* Save flags for reg domain usage */
4008 geo_ch->orig_flags = geo_ch->flags;
4009
4010 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4011 ch->channel, geo_ch->center_freq,
4012 is_channel_a_band(ch) ? "5.2" : "2.4",
4013 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4014 "restricted" : "valid",
4015 geo_ch->flags);
4016 }
4017
4018 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4019 priv->cfg->sku & IWL_SKU_A) {
4020 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
4021 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
4022 priv->pci_dev->device, priv->pci_dev->subsystem_device);
4023 priv->cfg->sku &= ~IWL_SKU_A;
4024 }
4025
4026 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
4027 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4028 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
4029
4030 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4031 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4032 &priv->bands[IEEE80211_BAND_2GHZ];
4033 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4034 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4035 &priv->bands[IEEE80211_BAND_5GHZ];
4036
4037 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4038
4039 return 0;
4040 }
4041
4042 /*
4043 * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4044 */
4045 static void iwl3945_free_geos(struct iwl_priv *priv)
4046 {
4047 kfree(priv->ieee_channels);
4048 kfree(priv->ieee_rates);
4049 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4050 }
4051
4052 /******************************************************************************
4053 *
4054 * uCode download functions
4055 *
4056 ******************************************************************************/
4057
4058 static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
4059 {
4060 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4061 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
4062 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4063 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
4064 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4065 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
4066 }
4067
4068 /**
4069 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
4070 * looking at all data.
4071 */
4072 static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
4073 {
4074 u32 val;
4075 u32 save_len = len;
4076 int rc = 0;
4077 u32 errcnt;
4078
4079 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4080
4081 rc = iwl_grab_nic_access(priv);
4082 if (rc)
4083 return rc;
4084
4085 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4086 IWL39_RTC_INST_LOWER_BOUND);
4087
4088 errcnt = 0;
4089 for (; len > 0; len -= sizeof(u32), image++) {
4090 /* read data comes through single port, auto-incr addr */
4091 /* NOTE: Use the debugless read so we don't flood kernel log
4092 * if IWL_DL_IO is set */
4093 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4094 if (val != le32_to_cpu(*image)) {
4095 IWL_ERR(priv, "uCode INST section is invalid at "
4096 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4097 save_len - len, val, le32_to_cpu(*image));
4098 rc = -EIO;
4099 errcnt++;
4100 if (errcnt >= 20)
4101 break;
4102 }
4103 }
4104
4105 iwl_release_nic_access(priv);
4106
4107 if (!errcnt)
4108 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n");
4109
4110 return rc;
4111 }
4112
4113
4114 /**
4115 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
4116 * using sample data 100 bytes apart. If these sample points are good,
4117 * it's a pretty good bet that everything between them is good, too.
4118 */
4119 static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
4120 {
4121 u32 val;
4122 int rc = 0;
4123 u32 errcnt = 0;
4124 u32 i;
4125
4126 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4127
4128 rc = iwl_grab_nic_access(priv);
4129 if (rc)
4130 return rc;
4131
4132 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4133 /* read data comes through single port, auto-incr addr */
4134 /* NOTE: Use the debugless read so we don't flood kernel log
4135 * if IWL_DL_IO is set */
4136 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4137 i + IWL39_RTC_INST_LOWER_BOUND);
4138 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4139 if (val != le32_to_cpu(*image)) {
4140 #if 0 /* Enable this if you want to see details */
4141 IWL_ERR(priv, "uCode INST section is invalid at "
4142 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4143 i, val, *image);
4144 #endif
4145 rc = -EIO;
4146 errcnt++;
4147 if (errcnt >= 3)
4148 break;
4149 }
4150 }
4151
4152 iwl_release_nic_access(priv);
4153
4154 return rc;
4155 }
4156
4157
4158 /**
4159 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
4160 * and verify its contents
4161 */
4162 static int iwl3945_verify_ucode(struct iwl_priv *priv)
4163 {
4164 __le32 *image;
4165 u32 len;
4166 int rc = 0;
4167
4168 /* Try bootstrap */
4169 image = (__le32 *)priv->ucode_boot.v_addr;
4170 len = priv->ucode_boot.len;
4171 rc = iwl3945_verify_inst_sparse(priv, image, len);
4172 if (rc == 0) {
4173 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4174 return 0;
4175 }
4176
4177 /* Try initialize */
4178 image = (__le32 *)priv->ucode_init.v_addr;
4179 len = priv->ucode_init.len;
4180 rc = iwl3945_verify_inst_sparse(priv, image, len);
4181 if (rc == 0) {
4182 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4183 return 0;
4184 }
4185
4186 /* Try runtime/protocol */
4187 image = (__le32 *)priv->ucode_code.v_addr;
4188 len = priv->ucode_code.len;
4189 rc = iwl3945_verify_inst_sparse(priv, image, len);
4190 if (rc == 0) {
4191 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4192 return 0;
4193 }
4194
4195 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
4196
4197 /* Since nothing seems to match, show first several data entries in
4198 * instruction SRAM, so maybe visual inspection will give a clue.
4199 * Selection of bootstrap image (vs. other images) is arbitrary. */
4200 image = (__le32 *)priv->ucode_boot.v_addr;
4201 len = priv->ucode_boot.len;
4202 rc = iwl3945_verify_inst_full(priv, image, len);
4203
4204 return rc;
4205 }
4206
4207 static void iwl3945_nic_start(struct iwl_priv *priv)
4208 {
4209 /* Remove all resets to allow NIC to operate */
4210 iwl_write32(priv, CSR_RESET, 0);
4211 }
4212
4213 /**
4214 * iwl3945_read_ucode - Read uCode images from disk file.
4215 *
4216 * Copy into buffers for card to fetch via bus-mastering
4217 */
4218 static int iwl3945_read_ucode(struct iwl_priv *priv)
4219 {
4220 struct iwl_ucode *ucode;
4221 int ret = -EINVAL, index;
4222 const struct firmware *ucode_raw;
4223 /* firmware file name contains uCode/driver compatibility version */
4224 const char *name_pre = priv->cfg->fw_name_pre;
4225 const unsigned int api_max = priv->cfg->ucode_api_max;
4226 const unsigned int api_min = priv->cfg->ucode_api_min;
4227 char buf[25];
4228 u8 *src;
4229 size_t len;
4230 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
4231
4232 /* Ask kernel firmware_class module to get the boot firmware off disk.
4233 * request_firmware() is synchronous, file is in memory on return. */
4234 for (index = api_max; index >= api_min; index--) {
4235 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
4236 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
4237 if (ret < 0) {
4238 IWL_ERR(priv, "%s firmware file req failed: %d\n",
4239 buf, ret);
4240 if (ret == -ENOENT)
4241 continue;
4242 else
4243 goto error;
4244 } else {
4245 if (index < api_max)
4246 IWL_ERR(priv, "Loaded firmware %s, "
4247 "which is deprecated. "
4248 " Please use API v%u instead.\n",
4249 buf, api_max);
4250 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
4251 buf, ucode_raw->size);
4252 break;
4253 }
4254 }
4255
4256 if (ret < 0)
4257 goto error;
4258
4259 /* Make sure that we got at least our header! */
4260 if (ucode_raw->size < sizeof(*ucode)) {
4261 IWL_ERR(priv, "File size way too small!\n");
4262 ret = -EINVAL;
4263 goto err_release;
4264 }
4265
4266 /* Data from ucode file: header followed by uCode images */
4267 ucode = (void *)ucode_raw->data;
4268
4269 priv->ucode_ver = le32_to_cpu(ucode->ver);
4270 api_ver = IWL_UCODE_API(priv->ucode_ver);
4271 inst_size = le32_to_cpu(ucode->inst_size);
4272 data_size = le32_to_cpu(ucode->data_size);
4273 init_size = le32_to_cpu(ucode->init_size);
4274 init_data_size = le32_to_cpu(ucode->init_data_size);
4275 boot_size = le32_to_cpu(ucode->boot_size);
4276
4277 /* api_ver should match the api version forming part of the
4278 * firmware filename ... but we don't check for that and only rely
4279 * on the API version read from firware header from here on forward */
4280
4281 if (api_ver < api_min || api_ver > api_max) {
4282 IWL_ERR(priv, "Driver unable to support your firmware API. "
4283 "Driver supports v%u, firmware is v%u.\n",
4284 api_max, api_ver);
4285 priv->ucode_ver = 0;
4286 ret = -EINVAL;
4287 goto err_release;
4288 }
4289 if (api_ver != api_max)
4290 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
4291 "got %u. New firmware can be obtained "
4292 "from http://www.intellinuxwireless.org.\n",
4293 api_max, api_ver);
4294
4295 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
4296 IWL_UCODE_MAJOR(priv->ucode_ver),
4297 IWL_UCODE_MINOR(priv->ucode_ver),
4298 IWL_UCODE_API(priv->ucode_ver),
4299 IWL_UCODE_SERIAL(priv->ucode_ver));
4300
4301 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
4302 priv->ucode_ver);
4303 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
4304 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
4305 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
4306 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
4307 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
4308
4309
4310 /* Verify size of file vs. image size info in file's header */
4311 if (ucode_raw->size < sizeof(*ucode) +
4312 inst_size + data_size + init_size +
4313 init_data_size + boot_size) {
4314
4315 IWL_DEBUG_INFO("uCode file size %d too small\n",
4316 (int)ucode_raw->size);
4317 ret = -EINVAL;
4318 goto err_release;
4319 }
4320
4321 /* Verify that uCode images will fit in card's SRAM */
4322 if (inst_size > IWL39_MAX_INST_SIZE) {
4323 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
4324 inst_size);
4325 ret = -EINVAL;
4326 goto err_release;
4327 }
4328
4329 if (data_size > IWL39_MAX_DATA_SIZE) {
4330 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
4331 data_size);
4332 ret = -EINVAL;
4333 goto err_release;
4334 }
4335 if (init_size > IWL39_MAX_INST_SIZE) {
4336 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n",
4337 init_size);
4338 ret = -EINVAL;
4339 goto err_release;
4340 }
4341 if (init_data_size > IWL39_MAX_DATA_SIZE) {
4342 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n",
4343 init_data_size);
4344 ret = -EINVAL;
4345 goto err_release;
4346 }
4347 if (boot_size > IWL39_MAX_BSM_SIZE) {
4348 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n",
4349 boot_size);
4350 ret = -EINVAL;
4351 goto err_release;
4352 }
4353
4354 /* Allocate ucode buffers for card's bus-master loading ... */
4355
4356 /* Runtime instructions and 2 copies of data:
4357 * 1) unmodified from disk
4358 * 2) backup cache for save/restore during power-downs */
4359 priv->ucode_code.len = inst_size;
4360 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
4361
4362 priv->ucode_data.len = data_size;
4363 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
4364
4365 priv->ucode_data_backup.len = data_size;
4366 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4367
4368 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
4369 !priv->ucode_data_backup.v_addr)
4370 goto err_pci_alloc;
4371
4372 /* Initialization instructions and data */
4373 if (init_size && init_data_size) {
4374 priv->ucode_init.len = init_size;
4375 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
4376
4377 priv->ucode_init_data.len = init_data_size;
4378 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4379
4380 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
4381 goto err_pci_alloc;
4382 }
4383
4384 /* Bootstrap (instructions only, no data) */
4385 if (boot_size) {
4386 priv->ucode_boot.len = boot_size;
4387 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
4388
4389 if (!priv->ucode_boot.v_addr)
4390 goto err_pci_alloc;
4391 }
4392
4393 /* Copy images into buffers for card's bus-master reads ... */
4394
4395 /* Runtime instructions (first block of data in file) */
4396 src = &ucode->data[0];
4397 len = priv->ucode_code.len;
4398 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
4399 memcpy(priv->ucode_code.v_addr, src, len);
4400 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4401 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
4402
4403 /* Runtime data (2nd block)
4404 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
4405 src = &ucode->data[inst_size];
4406 len = priv->ucode_data.len;
4407 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
4408 memcpy(priv->ucode_data.v_addr, src, len);
4409 memcpy(priv->ucode_data_backup.v_addr, src, len);
4410
4411 /* Initialization instructions (3rd block) */
4412 if (init_size) {
4413 src = &ucode->data[inst_size + data_size];
4414 len = priv->ucode_init.len;
4415 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
4416 len);
4417 memcpy(priv->ucode_init.v_addr, src, len);
4418 }
4419
4420 /* Initialization data (4th block) */
4421 if (init_data_size) {
4422 src = &ucode->data[inst_size + data_size + init_size];
4423 len = priv->ucode_init_data.len;
4424 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
4425 (int)len);
4426 memcpy(priv->ucode_init_data.v_addr, src, len);
4427 }
4428
4429 /* Bootstrap instructions (5th block) */
4430 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
4431 len = priv->ucode_boot.len;
4432 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
4433 (int)len);
4434 memcpy(priv->ucode_boot.v_addr, src, len);
4435
4436 /* We have our copies now, allow OS release its copies */
4437 release_firmware(ucode_raw);
4438 return 0;
4439
4440 err_pci_alloc:
4441 IWL_ERR(priv, "failed to allocate pci memory\n");
4442 ret = -ENOMEM;
4443 iwl3945_dealloc_ucode_pci(priv);
4444
4445 err_release:
4446 release_firmware(ucode_raw);
4447
4448 error:
4449 return ret;
4450 }
4451
4452
4453 /**
4454 * iwl3945_set_ucode_ptrs - Set uCode address location
4455 *
4456 * Tell initialization uCode where to find runtime uCode.
4457 *
4458 * BSM registers initially contain pointers to initialization uCode.
4459 * We need to replace them to load runtime uCode inst and data,
4460 * and to save runtime data when powering down.
4461 */
4462 static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
4463 {
4464 dma_addr_t pinst;
4465 dma_addr_t pdata;
4466 int rc = 0;
4467 unsigned long flags;
4468
4469 /* bits 31:0 for 3945 */
4470 pinst = priv->ucode_code.p_addr;
4471 pdata = priv->ucode_data_backup.p_addr;
4472
4473 spin_lock_irqsave(&priv->lock, flags);
4474 rc = iwl_grab_nic_access(priv);
4475 if (rc) {
4476 spin_unlock_irqrestore(&priv->lock, flags);
4477 return rc;
4478 }
4479
4480 /* Tell bootstrap uCode where to find image to load */
4481 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
4482 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
4483 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
4484 priv->ucode_data.len);
4485
4486 /* Inst byte count must be last to set up, bit 31 signals uCode
4487 * that all new ptr/size info is in place */
4488 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
4489 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
4490
4491 iwl_release_nic_access(priv);
4492
4493 spin_unlock_irqrestore(&priv->lock, flags);
4494
4495 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
4496
4497 return rc;
4498 }
4499
4500 /**
4501 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
4502 *
4503 * Called after REPLY_ALIVE notification received from "initialize" uCode.
4504 *
4505 * Tell "initialize" uCode to go ahead and load the runtime uCode.
4506 */
4507 static void iwl3945_init_alive_start(struct iwl_priv *priv)
4508 {
4509 /* Check alive response for "valid" sign from uCode */
4510 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
4511 /* We had an error bringing up the hardware, so take it
4512 * all the way back down so we can try again */
4513 IWL_DEBUG_INFO("Initialize Alive failed.\n");
4514 goto restart;
4515 }
4516
4517 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
4518 * This is a paranoid check, because we would not have gotten the
4519 * "initialize" alive if code weren't properly loaded. */
4520 if (iwl3945_verify_ucode(priv)) {
4521 /* Runtime instruction load was bad;
4522 * take it all the way back down so we can try again */
4523 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
4524 goto restart;
4525 }
4526
4527 /* Send pointers to protocol/runtime uCode image ... init code will
4528 * load and launch runtime uCode, which will send us another "Alive"
4529 * notification. */
4530 IWL_DEBUG_INFO("Initialization Alive received.\n");
4531 if (iwl3945_set_ucode_ptrs(priv)) {
4532 /* Runtime instruction load won't happen;
4533 * take it all the way back down so we can try again */
4534 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
4535 goto restart;
4536 }
4537 return;
4538
4539 restart:
4540 queue_work(priv->workqueue, &priv->restart);
4541 }
4542
4543
4544 /* temporary */
4545 static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
4546 struct sk_buff *skb);
4547
4548 /**
4549 * iwl3945_alive_start - called after REPLY_ALIVE notification received
4550 * from protocol/runtime uCode (initialization uCode's
4551 * Alive gets handled by iwl3945_init_alive_start()).
4552 */
4553 static void iwl3945_alive_start(struct iwl_priv *priv)
4554 {
4555 int rc = 0;
4556 int thermal_spin = 0;
4557 u32 rfkill;
4558
4559 IWL_DEBUG_INFO("Runtime Alive received.\n");
4560
4561 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
4562 /* We had an error bringing up the hardware, so take it
4563 * all the way back down so we can try again */
4564 IWL_DEBUG_INFO("Alive failed.\n");
4565 goto restart;
4566 }
4567
4568 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
4569 * This is a paranoid check, because we would not have gotten the
4570 * "runtime" alive if code weren't properly loaded. */
4571 if (iwl3945_verify_ucode(priv)) {
4572 /* Runtime instruction load was bad;
4573 * take it all the way back down so we can try again */
4574 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
4575 goto restart;
4576 }
4577
4578 iwl3945_clear_stations_table(priv);
4579
4580 rc = iwl_grab_nic_access(priv);
4581 if (rc) {
4582 IWL_WARN(priv, "Can not read RFKILL status from adapter\n");
4583 return;
4584 }
4585
4586 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
4587 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
4588 iwl_release_nic_access(priv);
4589
4590 if (rfkill & 0x1) {
4591 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4592 /* if RFKILL is not on, then wait for thermal
4593 * sensor in adapter to kick in */
4594 while (iwl3945_hw_get_temperature(priv) == 0) {
4595 thermal_spin++;
4596 udelay(10);
4597 }
4598
4599 if (thermal_spin)
4600 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
4601 thermal_spin * 10);
4602 } else
4603 set_bit(STATUS_RF_KILL_HW, &priv->status);
4604
4605 /* After the ALIVE response, we can send commands to 3945 uCode */
4606 set_bit(STATUS_ALIVE, &priv->status);
4607
4608 /* Clear out the uCode error bit if it is set */
4609 clear_bit(STATUS_FW_ERROR, &priv->status);
4610
4611 if (iwl_is_rfkill(priv))
4612 return;
4613
4614 ieee80211_wake_queues(priv->hw);
4615
4616 priv->active_rate = priv->rates_mask;
4617 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
4618
4619 iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
4620
4621 if (iwl3945_is_associated(priv)) {
4622 struct iwl3945_rxon_cmd *active_rxon =
4623 (struct iwl3945_rxon_cmd *)(&priv->active39_rxon);
4624
4625 memcpy(&priv->staging39_rxon, &priv->active39_rxon,
4626 sizeof(priv->staging39_rxon));
4627 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4628 } else {
4629 /* Initialize our rx_config data */
4630 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
4631 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
4632 }
4633
4634 /* Configure Bluetooth device coexistence support */
4635 iwl3945_send_bt_config(priv);
4636
4637 /* Configure the adapter for unassociated operation */
4638 iwl3945_commit_rxon(priv);
4639
4640 iwl3945_reg_txpower_periodic(priv);
4641
4642 iwl3945_led_register(priv);
4643
4644 IWL_DEBUG_INFO("ALIVE processing complete.\n");
4645 set_bit(STATUS_READY, &priv->status);
4646 wake_up_interruptible(&priv->wait_command_queue);
4647
4648 if (priv->error_recovering)
4649 iwl3945_error_recovery(priv);
4650
4651 /* reassociate for ADHOC mode */
4652 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
4653 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
4654 priv->vif);
4655 if (beacon)
4656 iwl3945_mac_beacon_update(priv->hw, beacon);
4657 }
4658
4659 return;
4660
4661 restart:
4662 queue_work(priv->workqueue, &priv->restart);
4663 }
4664
4665 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
4666
4667 static void __iwl3945_down(struct iwl_priv *priv)
4668 {
4669 unsigned long flags;
4670 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
4671 struct ieee80211_conf *conf = NULL;
4672
4673 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
4674
4675 conf = ieee80211_get_hw_conf(priv->hw);
4676
4677 if (!exit_pending)
4678 set_bit(STATUS_EXIT_PENDING, &priv->status);
4679
4680 iwl3945_led_unregister(priv);
4681 iwl3945_clear_stations_table(priv);
4682
4683 /* Unblock any waiting calls */
4684 wake_up_interruptible_all(&priv->wait_command_queue);
4685
4686 /* Wipe out the EXIT_PENDING status bit if we are not actually
4687 * exiting the module */
4688 if (!exit_pending)
4689 clear_bit(STATUS_EXIT_PENDING, &priv->status);
4690
4691 /* stop and reset the on-board processor */
4692 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4693
4694 /* tell the device to stop sending interrupts */
4695 spin_lock_irqsave(&priv->lock, flags);
4696 iwl3945_disable_interrupts(priv);
4697 spin_unlock_irqrestore(&priv->lock, flags);
4698 iwl_synchronize_irq(priv);
4699
4700 if (priv->mac80211_registered)
4701 ieee80211_stop_queues(priv->hw);
4702
4703 /* If we have not previously called iwl3945_init() then
4704 * clear all bits but the RF Kill and SUSPEND bits and return */
4705 if (!iwl_is_init(priv)) {
4706 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
4707 STATUS_RF_KILL_HW |
4708 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
4709 STATUS_RF_KILL_SW |
4710 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
4711 STATUS_GEO_CONFIGURED |
4712 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
4713 STATUS_IN_SUSPEND |
4714 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
4715 STATUS_EXIT_PENDING;
4716 goto exit;
4717 }
4718
4719 /* ...otherwise clear out all the status bits but the RF Kill and
4720 * SUSPEND bits and continue taking the NIC down. */
4721 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
4722 STATUS_RF_KILL_HW |
4723 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
4724 STATUS_RF_KILL_SW |
4725 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
4726 STATUS_GEO_CONFIGURED |
4727 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
4728 STATUS_IN_SUSPEND |
4729 test_bit(STATUS_FW_ERROR, &priv->status) <<
4730 STATUS_FW_ERROR |
4731 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
4732 STATUS_EXIT_PENDING;
4733
4734 priv->cfg->ops->lib->apm_ops.reset(priv);
4735 spin_lock_irqsave(&priv->lock, flags);
4736 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4737 spin_unlock_irqrestore(&priv->lock, flags);
4738
4739 iwl3945_hw_txq_ctx_stop(priv);
4740 iwl3945_hw_rxq_stop(priv);
4741
4742 spin_lock_irqsave(&priv->lock, flags);
4743 if (!iwl_grab_nic_access(priv)) {
4744 iwl_write_prph(priv, APMG_CLK_DIS_REG,
4745 APMG_CLK_VAL_DMA_CLK_RQT);
4746 iwl_release_nic_access(priv);
4747 }
4748 spin_unlock_irqrestore(&priv->lock, flags);
4749
4750 udelay(5);
4751
4752 if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
4753 priv->cfg->ops->lib->apm_ops.stop(priv);
4754 else
4755 priv->cfg->ops->lib->apm_ops.reset(priv);
4756
4757 exit:
4758 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
4759
4760 if (priv->ibss_beacon)
4761 dev_kfree_skb(priv->ibss_beacon);
4762 priv->ibss_beacon = NULL;
4763
4764 /* clear out any free frames */
4765 iwl3945_clear_free_frames(priv);
4766 }
4767
4768 static void iwl3945_down(struct iwl_priv *priv)
4769 {
4770 mutex_lock(&priv->mutex);
4771 __iwl3945_down(priv);
4772 mutex_unlock(&priv->mutex);
4773
4774 iwl3945_cancel_deferred_work(priv);
4775 }
4776
4777 #define MAX_HW_RESTARTS 5
4778
4779 static int __iwl3945_up(struct iwl_priv *priv)
4780 {
4781 int rc, i;
4782
4783 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4784 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
4785 return -EIO;
4786 }
4787
4788 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
4789 IWL_WARN(priv, "Radio disabled by SW RF kill (module "
4790 "parameter)\n");
4791 return -ENODEV;
4792 }
4793
4794 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
4795 IWL_ERR(priv, "ucode not available for device bring up\n");
4796 return -EIO;
4797 }
4798
4799 /* If platform's RF_KILL switch is NOT set to KILL */
4800 if (iwl_read32(priv, CSR_GP_CNTRL) &
4801 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
4802 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4803 else {
4804 set_bit(STATUS_RF_KILL_HW, &priv->status);
4805 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
4806 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
4807 return -ENODEV;
4808 }
4809 }
4810
4811 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
4812
4813 rc = iwl3945_hw_nic_init(priv);
4814 if (rc) {
4815 IWL_ERR(priv, "Unable to int nic\n");
4816 return rc;
4817 }
4818
4819 /* make sure rfkill handshake bits are cleared */
4820 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4821 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
4822 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4823
4824 /* clear (again), then enable host interrupts */
4825 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
4826 iwl3945_enable_interrupts(priv);
4827
4828 /* really make sure rfkill handshake bits are cleared */
4829 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4830 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4831
4832 /* Copy original ucode data image from disk into backup cache.
4833 * This will be used to initialize the on-board processor's
4834 * data SRAM for a clean start when the runtime program first loads. */
4835 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
4836 priv->ucode_data.len);
4837
4838 /* We return success when we resume from suspend and rf_kill is on. */
4839 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
4840 return 0;
4841
4842 for (i = 0; i < MAX_HW_RESTARTS; i++) {
4843
4844 iwl3945_clear_stations_table(priv);
4845
4846 /* load bootstrap state machine,
4847 * load bootstrap program into processor's memory,
4848 * prepare to load the "initialize" uCode */
4849 priv->cfg->ops->lib->load_ucode(priv);
4850
4851 if (rc) {
4852 IWL_ERR(priv,
4853 "Unable to set up bootstrap uCode: %d\n", rc);
4854 continue;
4855 }
4856
4857 /* start card; "initialize" will load runtime ucode */
4858 iwl3945_nic_start(priv);
4859
4860 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
4861
4862 return 0;
4863 }
4864
4865 set_bit(STATUS_EXIT_PENDING, &priv->status);
4866 __iwl3945_down(priv);
4867 clear_bit(STATUS_EXIT_PENDING, &priv->status);
4868
4869 /* tried to restart and config the device for as long as our
4870 * patience could withstand */
4871 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
4872 return -EIO;
4873 }
4874
4875
4876 /*****************************************************************************
4877 *
4878 * Workqueue callbacks
4879 *
4880 *****************************************************************************/
4881
4882 static void iwl3945_bg_init_alive_start(struct work_struct *data)
4883 {
4884 struct iwl_priv *priv =
4885 container_of(data, struct iwl_priv, init_alive_start.work);
4886
4887 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
4888 return;
4889
4890 mutex_lock(&priv->mutex);
4891 iwl3945_init_alive_start(priv);
4892 mutex_unlock(&priv->mutex);
4893 }
4894
4895 static void iwl3945_bg_alive_start(struct work_struct *data)
4896 {
4897 struct iwl_priv *priv =
4898 container_of(data, struct iwl_priv, alive_start.work);
4899
4900 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
4901 return;
4902
4903 mutex_lock(&priv->mutex);
4904 iwl3945_alive_start(priv);
4905 mutex_unlock(&priv->mutex);
4906 }
4907
4908 static void iwl3945_rfkill_poll(struct work_struct *data)
4909 {
4910 struct iwl_priv *priv =
4911 container_of(data, struct iwl_priv, rfkill_poll.work);
4912 unsigned long status = priv->status;
4913
4914 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
4915 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4916 else
4917 set_bit(STATUS_RF_KILL_HW, &priv->status);
4918
4919 if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))
4920 queue_work(priv->workqueue, &priv->rf_kill);
4921
4922 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
4923 round_jiffies_relative(2 * HZ));
4924
4925 }
4926
4927 #define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
4928 static void iwl3945_bg_request_scan(struct work_struct *data)
4929 {
4930 struct iwl_priv *priv =
4931 container_of(data, struct iwl_priv, request_scan);
4932 struct iwl_host_cmd cmd = {
4933 .id = REPLY_SCAN_CMD,
4934 .len = sizeof(struct iwl3945_scan_cmd),
4935 .meta.flags = CMD_SIZE_HUGE,
4936 };
4937 int rc = 0;
4938 struct iwl3945_scan_cmd *scan;
4939 struct ieee80211_conf *conf = NULL;
4940 u8 n_probes = 2;
4941 enum ieee80211_band band;
4942 DECLARE_SSID_BUF(ssid);
4943
4944 conf = ieee80211_get_hw_conf(priv->hw);
4945
4946 mutex_lock(&priv->mutex);
4947
4948 if (!iwl_is_ready(priv)) {
4949 IWL_WARN(priv, "request scan called when driver not ready.\n");
4950 goto done;
4951 }
4952
4953 /* Make sure the scan wasn't canceled before this queued work
4954 * was given the chance to run... */
4955 if (!test_bit(STATUS_SCANNING, &priv->status))
4956 goto done;
4957
4958 /* This should never be called or scheduled if there is currently
4959 * a scan active in the hardware. */
4960 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
4961 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
4962 "Ignoring second request.\n");
4963 rc = -EIO;
4964 goto done;
4965 }
4966
4967 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4968 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
4969 goto done;
4970 }
4971
4972 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4973 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
4974 goto done;
4975 }
4976
4977 if (iwl_is_rfkill(priv)) {
4978 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
4979 goto done;
4980 }
4981
4982 if (!test_bit(STATUS_READY, &priv->status)) {
4983 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
4984 goto done;
4985 }
4986
4987 if (!priv->scan_bands) {
4988 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
4989 goto done;
4990 }
4991
4992 if (!priv->scan) {
4993 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
4994 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
4995 if (!priv->scan) {
4996 rc = -ENOMEM;
4997 goto done;
4998 }
4999 }
5000 scan = priv->scan;
5001 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
5002
5003 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
5004 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
5005
5006 if (iwl3945_is_associated(priv)) {
5007 u16 interval = 0;
5008 u32 extra;
5009 u32 suspend_time = 100;
5010 u32 scan_suspend_time = 100;
5011 unsigned long flags;
5012
5013 IWL_DEBUG_INFO("Scanning while associated...\n");
5014
5015 spin_lock_irqsave(&priv->lock, flags);
5016 interval = priv->beacon_int;
5017 spin_unlock_irqrestore(&priv->lock, flags);
5018
5019 scan->suspend_time = 0;
5020 scan->max_out_time = cpu_to_le32(200 * 1024);
5021 if (!interval)
5022 interval = suspend_time;
5023 /*
5024 * suspend time format:
5025 * 0-19: beacon interval in usec (time before exec.)
5026 * 20-23: 0
5027 * 24-31: number of beacons (suspend between channels)
5028 */
5029
5030 extra = (suspend_time / interval) << 24;
5031 scan_suspend_time = 0xFF0FFFFF &
5032 (extra | ((suspend_time % interval) * 1024));
5033
5034 scan->suspend_time = cpu_to_le32(scan_suspend_time);
5035 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
5036 scan_suspend_time, interval);
5037 }
5038
5039 /* We should add the ability for user to lock to PASSIVE ONLY */
5040 if (priv->one_direct_scan) {
5041 IWL_DEBUG_SCAN
5042 ("Kicking off one direct scan for '%s'\n",
5043 print_ssid(ssid, priv->direct_ssid,
5044 priv->direct_ssid_len));
5045 scan->direct_scan[0].id = WLAN_EID_SSID;
5046 scan->direct_scan[0].len = priv->direct_ssid_len;
5047 memcpy(scan->direct_scan[0].ssid,
5048 priv->direct_ssid, priv->direct_ssid_len);
5049 n_probes++;
5050 } else
5051 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
5052
5053 /* We don't build a direct scan probe request; the uCode will do
5054 * that based on the direct_mask added to each channel entry */
5055 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
5056 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
5057 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
5058
5059 /* flags + rate selection */
5060
5061 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
5062 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
5063 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
5064 scan->good_CRC_th = 0;
5065 band = IEEE80211_BAND_2GHZ;
5066 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
5067 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
5068 scan->good_CRC_th = IWL_GOOD_CRC_TH;
5069 band = IEEE80211_BAND_5GHZ;
5070 } else {
5071 IWL_WARN(priv, "Invalid scan band count\n");
5072 goto done;
5073 }
5074
5075 scan->tx_cmd.len = cpu_to_le16(
5076 iwl_fill_probe_req(priv, band,
5077 (struct ieee80211_mgmt *)scan->data,
5078 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
5079
5080 /* select Rx antennas */
5081 scan->flags |= iwl3945_get_antenna_flags(priv);
5082
5083 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
5084 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
5085
5086 scan->channel_count =
5087 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
5088 n_probes,
5089 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5090
5091 if (scan->channel_count == 0) {
5092 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
5093 goto done;
5094 }
5095
5096 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
5097 scan->channel_count * sizeof(struct iwl3945_scan_channel);
5098 cmd.data = scan;
5099 scan->len = cpu_to_le16(cmd.len);
5100
5101 set_bit(STATUS_SCAN_HW, &priv->status);
5102 rc = iwl_send_cmd_sync(priv, &cmd);
5103 if (rc)
5104 goto done;
5105
5106 queue_delayed_work(priv->workqueue, &priv->scan_check,
5107 IWL_SCAN_CHECK_WATCHDOG);
5108
5109 mutex_unlock(&priv->mutex);
5110 return;
5111
5112 done:
5113 /* can not perform scan make sure we clear scanning
5114 * bits from status so next scan request can be performed.
5115 * if we dont clear scanning status bit here all next scan
5116 * will fail
5117 */
5118 clear_bit(STATUS_SCAN_HW, &priv->status);
5119 clear_bit(STATUS_SCANNING, &priv->status);
5120
5121 /* inform mac80211 scan aborted */
5122 queue_work(priv->workqueue, &priv->scan_completed);
5123 mutex_unlock(&priv->mutex);
5124 }
5125
5126 static void iwl3945_bg_up(struct work_struct *data)
5127 {
5128 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
5129
5130 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5131 return;
5132
5133 mutex_lock(&priv->mutex);
5134 __iwl3945_up(priv);
5135 mutex_unlock(&priv->mutex);
5136 iwl_rfkill_set_hw_state(priv);
5137 }
5138
5139 static void iwl3945_bg_restart(struct work_struct *data)
5140 {
5141 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
5142
5143 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5144 return;
5145
5146 iwl3945_down(priv);
5147 queue_work(priv->workqueue, &priv->up);
5148 }
5149
5150 static void iwl3945_bg_rx_replenish(struct work_struct *data)
5151 {
5152 struct iwl_priv *priv =
5153 container_of(data, struct iwl_priv, rx_replenish);
5154
5155 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5156 return;
5157
5158 mutex_lock(&priv->mutex);
5159 iwl3945_rx_replenish(priv);
5160 mutex_unlock(&priv->mutex);
5161 }
5162
5163 #define IWL_DELAY_NEXT_SCAN (HZ*2)
5164
5165 static void iwl3945_post_associate(struct iwl_priv *priv)
5166 {
5167 int rc = 0;
5168 struct ieee80211_conf *conf = NULL;
5169
5170 if (priv->iw_mode == NL80211_IFTYPE_AP) {
5171 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
5172 return;
5173 }
5174
5175
5176 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
5177 priv->assoc_id, priv->active39_rxon.bssid_addr);
5178
5179 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5180 return;
5181
5182 if (!priv->vif || !priv->is_open)
5183 return;
5184
5185 iwl_scan_cancel_timeout(priv, 200);
5186
5187 conf = ieee80211_get_hw_conf(priv->hw);
5188
5189 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5190 iwl3945_commit_rxon(priv);
5191
5192 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
5193 iwl3945_setup_rxon_timing(priv);
5194 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
5195 sizeof(priv->rxon_timing), &priv->rxon_timing);
5196 if (rc)
5197 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
5198 "Attempting to continue.\n");
5199
5200 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
5201
5202 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
5203
5204 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
5205 priv->assoc_id, priv->beacon_int);
5206
5207 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
5208 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5209 else
5210 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5211
5212 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
5213 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
5214 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
5215 else
5216 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
5217
5218 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
5219 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
5220
5221 }
5222
5223 iwl3945_commit_rxon(priv);
5224
5225 switch (priv->iw_mode) {
5226 case NL80211_IFTYPE_STATION:
5227 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
5228 break;
5229
5230 case NL80211_IFTYPE_ADHOC:
5231
5232 priv->assoc_id = 1;
5233 iwl3945_add_station(priv, priv->bssid, 0, 0);
5234 iwl3945_sync_sta(priv, IWL_STA_ID,
5235 (priv->band == IEEE80211_BAND_5GHZ) ?
5236 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
5237 CMD_ASYNC);
5238 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
5239 iwl3945_send_beacon_cmd(priv);
5240
5241 break;
5242
5243 default:
5244 IWL_ERR(priv, "%s Should not be called in %d mode\n",
5245 __func__, priv->iw_mode);
5246 break;
5247 }
5248
5249 iwl3945_activate_qos(priv, 0);
5250
5251 /* we have just associated, don't start scan too early */
5252 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
5253 }
5254
5255 static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
5256
5257 /*****************************************************************************
5258 *
5259 * mac80211 entry point functions
5260 *
5261 *****************************************************************************/
5262
5263 #define UCODE_READY_TIMEOUT (2 * HZ)
5264
5265 static int iwl3945_mac_start(struct ieee80211_hw *hw)
5266 {
5267 struct iwl_priv *priv = hw->priv;
5268 int ret;
5269
5270 IWL_DEBUG_MAC80211("enter\n");
5271
5272 /* we should be verifying the device is ready to be opened */
5273 mutex_lock(&priv->mutex);
5274
5275 memset(&priv->staging39_rxon, 0, sizeof(struct iwl3945_rxon_cmd));
5276 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
5277 * ucode filename and max sizes are card-specific. */
5278
5279 if (!priv->ucode_code.len) {
5280 ret = iwl3945_read_ucode(priv);
5281 if (ret) {
5282 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
5283 mutex_unlock(&priv->mutex);
5284 goto out_release_irq;
5285 }
5286 }
5287
5288 ret = __iwl3945_up(priv);
5289
5290 mutex_unlock(&priv->mutex);
5291
5292 iwl_rfkill_set_hw_state(priv);
5293
5294 if (ret)
5295 goto out_release_irq;
5296
5297 IWL_DEBUG_INFO("Start UP work.\n");
5298
5299 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
5300 return 0;
5301
5302 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
5303 * mac80211 will not be run successfully. */
5304 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
5305 test_bit(STATUS_READY, &priv->status),
5306 UCODE_READY_TIMEOUT);
5307 if (!ret) {
5308 if (!test_bit(STATUS_READY, &priv->status)) {
5309 IWL_ERR(priv,
5310 "Wait for START_ALIVE timeout after %dms.\n",
5311 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5312 ret = -ETIMEDOUT;
5313 goto out_release_irq;
5314 }
5315 }
5316
5317 /* ucode is running and will send rfkill notifications,
5318 * no need to poll the killswitch state anymore */
5319 cancel_delayed_work(&priv->rfkill_poll);
5320
5321 priv->is_open = 1;
5322 IWL_DEBUG_MAC80211("leave\n");
5323 return 0;
5324
5325 out_release_irq:
5326 priv->is_open = 0;
5327 IWL_DEBUG_MAC80211("leave - failed\n");
5328 return ret;
5329 }
5330
5331 static void iwl3945_mac_stop(struct ieee80211_hw *hw)
5332 {
5333 struct iwl_priv *priv = hw->priv;
5334
5335 IWL_DEBUG_MAC80211("enter\n");
5336
5337 if (!priv->is_open) {
5338 IWL_DEBUG_MAC80211("leave - skip\n");
5339 return;
5340 }
5341
5342 priv->is_open = 0;
5343
5344 if (iwl_is_ready_rf(priv)) {
5345 /* stop mac, cancel any scan request and clear
5346 * RXON_FILTER_ASSOC_MSK BIT
5347 */
5348 mutex_lock(&priv->mutex);
5349 iwl_scan_cancel_timeout(priv, 100);
5350 mutex_unlock(&priv->mutex);
5351 }
5352
5353 iwl3945_down(priv);
5354
5355 flush_workqueue(priv->workqueue);
5356
5357 /* start polling the killswitch state again */
5358 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5359 round_jiffies_relative(2 * HZ));
5360
5361 IWL_DEBUG_MAC80211("leave\n");
5362 }
5363
5364 static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5365 {
5366 struct iwl_priv *priv = hw->priv;
5367
5368 IWL_DEBUG_MAC80211("enter\n");
5369
5370 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5371 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5372
5373 if (iwl3945_tx_skb(priv, skb))
5374 dev_kfree_skb_any(skb);
5375
5376 IWL_DEBUG_MAC80211("leave\n");
5377 return NETDEV_TX_OK;
5378 }
5379
5380 static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
5381 struct ieee80211_if_init_conf *conf)
5382 {
5383 struct iwl_priv *priv = hw->priv;
5384 unsigned long flags;
5385
5386 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
5387
5388 if (priv->vif) {
5389 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
5390 return -EOPNOTSUPP;
5391 }
5392
5393 spin_lock_irqsave(&priv->lock, flags);
5394 priv->vif = conf->vif;
5395 priv->iw_mode = conf->type;
5396
5397 spin_unlock_irqrestore(&priv->lock, flags);
5398
5399 mutex_lock(&priv->mutex);
5400
5401 if (conf->mac_addr) {
5402 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr);
5403 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
5404 }
5405
5406 if (iwl_is_ready(priv))
5407 iwl3945_set_mode(priv, conf->type);
5408
5409 mutex_unlock(&priv->mutex);
5410
5411 IWL_DEBUG_MAC80211("leave\n");
5412 return 0;
5413 }
5414
5415 /**
5416 * iwl3945_mac_config - mac80211 config callback
5417 *
5418 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
5419 * be set inappropriately and the driver currently sets the hardware up to
5420 * use it whenever needed.
5421 */
5422 static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
5423 {
5424 struct iwl_priv *priv = hw->priv;
5425 const struct iwl_channel_info *ch_info;
5426 struct ieee80211_conf *conf = &hw->conf;
5427 unsigned long flags;
5428 int ret = 0;
5429
5430 mutex_lock(&priv->mutex);
5431 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
5432
5433 if (!iwl_is_ready(priv)) {
5434 IWL_DEBUG_MAC80211("leave - not ready\n");
5435 ret = -EIO;
5436 goto out;
5437 }
5438
5439 if (unlikely(!iwl3945_mod_params.disable_hw_scan &&
5440 test_bit(STATUS_SCANNING, &priv->status))) {
5441 IWL_DEBUG_MAC80211("leave - scanning\n");
5442 set_bit(STATUS_CONF_PENDING, &priv->status);
5443 mutex_unlock(&priv->mutex);
5444 return 0;
5445 }
5446
5447 spin_lock_irqsave(&priv->lock, flags);
5448
5449 ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
5450 conf->channel->hw_value);
5451 if (!is_channel_valid(ch_info)) {
5452 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
5453 conf->channel->hw_value, conf->channel->band);
5454 IWL_DEBUG_MAC80211("leave - invalid channel\n");
5455 spin_unlock_irqrestore(&priv->lock, flags);
5456 ret = -EINVAL;
5457 goto out;
5458 }
5459
5460 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value);
5461
5462 iwl3945_set_flags_for_phymode(priv, conf->channel->band);
5463
5464 /* The list of supported rates and rate mask can be different
5465 * for each phymode; since the phymode may have changed, reset
5466 * the rate mask to what mac80211 lists */
5467 iwl3945_set_rate(priv);
5468
5469 spin_unlock_irqrestore(&priv->lock, flags);
5470
5471 #ifdef IEEE80211_CONF_CHANNEL_SWITCH
5472 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
5473 iwl3945_hw_channel_switch(priv, conf->channel);
5474 goto out;
5475 }
5476 #endif
5477
5478 iwl3945_radio_kill_sw(priv, !conf->radio_enabled);
5479
5480 if (!conf->radio_enabled) {
5481 IWL_DEBUG_MAC80211("leave - radio disabled\n");
5482 goto out;
5483 }
5484
5485 if (iwl_is_rfkill(priv)) {
5486 IWL_DEBUG_MAC80211("leave - RF kill\n");
5487 ret = -EIO;
5488 goto out;
5489 }
5490
5491 iwl3945_set_rate(priv);
5492
5493 if (memcmp(&priv->active39_rxon,
5494 &priv->staging39_rxon, sizeof(priv->staging39_rxon)))
5495 iwl3945_commit_rxon(priv);
5496 else
5497 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
5498
5499 IWL_DEBUG_MAC80211("leave\n");
5500
5501 out:
5502 clear_bit(STATUS_CONF_PENDING, &priv->status);
5503 mutex_unlock(&priv->mutex);
5504 return ret;
5505 }
5506
5507 static void iwl3945_config_ap(struct iwl_priv *priv)
5508 {
5509 int rc = 0;
5510
5511 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5512 return;
5513
5514 /* The following should be done only at AP bring up */
5515 if (!(iwl3945_is_associated(priv))) {
5516
5517 /* RXON - unassoc (to set timing command) */
5518 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5519 iwl3945_commit_rxon(priv);
5520
5521 /* RXON Timing */
5522 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
5523 iwl3945_setup_rxon_timing(priv);
5524 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
5525 sizeof(priv->rxon_timing),
5526 &priv->rxon_timing);
5527 if (rc)
5528 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
5529 "Attempting to continue.\n");
5530
5531 /* FIXME: what should be the assoc_id for AP? */
5532 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
5533 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
5534 priv->staging39_rxon.flags |=
5535 RXON_FLG_SHORT_PREAMBLE_MSK;
5536 else
5537 priv->staging39_rxon.flags &=
5538 ~RXON_FLG_SHORT_PREAMBLE_MSK;
5539
5540 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
5541 if (priv->assoc_capability &
5542 WLAN_CAPABILITY_SHORT_SLOT_TIME)
5543 priv->staging39_rxon.flags |=
5544 RXON_FLG_SHORT_SLOT_MSK;
5545 else
5546 priv->staging39_rxon.flags &=
5547 ~RXON_FLG_SHORT_SLOT_MSK;
5548
5549 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
5550 priv->staging39_rxon.flags &=
5551 ~RXON_FLG_SHORT_SLOT_MSK;
5552 }
5553 /* restore RXON assoc */
5554 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
5555 iwl3945_commit_rxon(priv);
5556 iwl3945_add_station(priv, iwl_bcast_addr, 0, 0);
5557 }
5558 iwl3945_send_beacon_cmd(priv);
5559
5560 /* FIXME - we need to add code here to detect a totally new
5561 * configuration, reset the AP, unassoc, rxon timing, assoc,
5562 * clear sta table, add BCAST sta... */
5563 }
5564
5565 static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
5566 struct ieee80211_vif *vif,
5567 struct ieee80211_if_conf *conf)
5568 {
5569 struct iwl_priv *priv = hw->priv;
5570 int rc;
5571
5572 if (conf == NULL)
5573 return -EIO;
5574
5575 if (priv->vif != vif) {
5576 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
5577 return 0;
5578 }
5579
5580 /* handle this temporarily here */
5581 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
5582 conf->changed & IEEE80211_IFCC_BEACON) {
5583 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
5584 if (!beacon)
5585 return -ENOMEM;
5586 mutex_lock(&priv->mutex);
5587 rc = iwl3945_mac_beacon_update(hw, beacon);
5588 mutex_unlock(&priv->mutex);
5589 if (rc)
5590 return rc;
5591 }
5592
5593 if (!iwl_is_alive(priv))
5594 return -EAGAIN;
5595
5596 mutex_lock(&priv->mutex);
5597
5598 if (conf->bssid)
5599 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
5600
5601 /*
5602 * very dubious code was here; the probe filtering flag is never set:
5603 *
5604 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
5605 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
5606 */
5607
5608 if (priv->iw_mode == NL80211_IFTYPE_AP) {
5609 if (!conf->bssid) {
5610 conf->bssid = priv->mac_addr;
5611 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
5612 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
5613 conf->bssid);
5614 }
5615 if (priv->ibss_beacon)
5616 dev_kfree_skb(priv->ibss_beacon);
5617
5618 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
5619 }
5620
5621 if (iwl_is_rfkill(priv))
5622 goto done;
5623
5624 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
5625 !is_multicast_ether_addr(conf->bssid)) {
5626 /* If there is currently a HW scan going on in the background
5627 * then we need to cancel it else the RXON below will fail. */
5628 if (iwl_scan_cancel_timeout(priv, 100)) {
5629 IWL_WARN(priv, "Aborted scan still in progress "
5630 "after 100ms\n");
5631 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
5632 mutex_unlock(&priv->mutex);
5633 return -EAGAIN;
5634 }
5635 memcpy(priv->staging39_rxon.bssid_addr, conf->bssid, ETH_ALEN);
5636
5637 /* TODO: Audit driver for usage of these members and see
5638 * if mac80211 deprecates them (priv->bssid looks like it
5639 * shouldn't be there, but I haven't scanned the IBSS code
5640 * to verify) - jpk */
5641 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
5642
5643 if (priv->iw_mode == NL80211_IFTYPE_AP)
5644 iwl3945_config_ap(priv);
5645 else {
5646 rc = iwl3945_commit_rxon(priv);
5647 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
5648 iwl3945_add_station(priv,
5649 priv->active39_rxon.bssid_addr, 1, 0);
5650 }
5651
5652 } else {
5653 iwl_scan_cancel_timeout(priv, 100);
5654 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5655 iwl3945_commit_rxon(priv);
5656 }
5657
5658 done:
5659 IWL_DEBUG_MAC80211("leave\n");
5660 mutex_unlock(&priv->mutex);
5661
5662 return 0;
5663 }
5664
5665 static void iwl3945_configure_filter(struct ieee80211_hw *hw,
5666 unsigned int changed_flags,
5667 unsigned int *total_flags,
5668 int mc_count, struct dev_addr_list *mc_list)
5669 {
5670 struct iwl_priv *priv = hw->priv;
5671 __le32 *filter_flags = &priv->staging39_rxon.filter_flags;
5672
5673 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
5674 changed_flags, *total_flags);
5675
5676 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
5677 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
5678 *filter_flags |= RXON_FILTER_PROMISC_MSK;
5679 else
5680 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
5681 }
5682 if (changed_flags & FIF_ALLMULTI) {
5683 if (*total_flags & FIF_ALLMULTI)
5684 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
5685 else
5686 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
5687 }
5688 if (changed_flags & FIF_CONTROL) {
5689 if (*total_flags & FIF_CONTROL)
5690 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
5691 else
5692 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
5693 }
5694 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
5695 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
5696 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
5697 else
5698 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
5699 }
5700
5701 /* We avoid iwl_commit_rxon here to commit the new filter flags
5702 * since mac80211 will call ieee80211_hw_config immediately.
5703 * (mc_list is not supported at this time). Otherwise, we need to
5704 * queue a background iwl_commit_rxon work.
5705 */
5706
5707 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
5708 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
5709 }
5710
5711 static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
5712 struct ieee80211_if_init_conf *conf)
5713 {
5714 struct iwl_priv *priv = hw->priv;
5715
5716 IWL_DEBUG_MAC80211("enter\n");
5717
5718 mutex_lock(&priv->mutex);
5719
5720 if (iwl_is_ready_rf(priv)) {
5721 iwl_scan_cancel_timeout(priv, 100);
5722 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5723 iwl3945_commit_rxon(priv);
5724 }
5725 if (priv->vif == conf->vif) {
5726 priv->vif = NULL;
5727 memset(priv->bssid, 0, ETH_ALEN);
5728 }
5729 mutex_unlock(&priv->mutex);
5730
5731 IWL_DEBUG_MAC80211("leave\n");
5732 }
5733
5734 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
5735
5736 static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
5737 struct ieee80211_vif *vif,
5738 struct ieee80211_bss_conf *bss_conf,
5739 u32 changes)
5740 {
5741 struct iwl_priv *priv = hw->priv;
5742
5743 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
5744
5745 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5746 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
5747 bss_conf->use_short_preamble);
5748 if (bss_conf->use_short_preamble)
5749 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5750 else
5751 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5752 }
5753
5754 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5755 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5756 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
5757 priv->staging39_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
5758 else
5759 priv->staging39_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5760 }
5761
5762 if (changes & BSS_CHANGED_ASSOC) {
5763 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
5764 /* This should never happen as this function should
5765 * never be called from interrupt context. */
5766 if (WARN_ON_ONCE(in_interrupt()))
5767 return;
5768 if (bss_conf->assoc) {
5769 priv->assoc_id = bss_conf->aid;
5770 priv->beacon_int = bss_conf->beacon_int;
5771 priv->timestamp = bss_conf->timestamp;
5772 priv->assoc_capability = bss_conf->assoc_capability;
5773 priv->power_data.dtim_period = bss_conf->dtim_period;
5774 priv->next_scan_jiffies = jiffies +
5775 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
5776 mutex_lock(&priv->mutex);
5777 iwl3945_post_associate(priv);
5778 mutex_unlock(&priv->mutex);
5779 } else {
5780 priv->assoc_id = 0;
5781 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
5782 }
5783 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
5784 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
5785 iwl3945_send_rxon_assoc(priv);
5786 }
5787
5788 }
5789
5790 static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
5791 {
5792 int rc = 0;
5793 unsigned long flags;
5794 struct iwl_priv *priv = hw->priv;
5795 DECLARE_SSID_BUF(ssid_buf);
5796
5797 IWL_DEBUG_MAC80211("enter\n");
5798
5799 mutex_lock(&priv->mutex);
5800 spin_lock_irqsave(&priv->lock, flags);
5801
5802 if (!iwl_is_ready_rf(priv)) {
5803 rc = -EIO;
5804 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
5805 goto out_unlock;
5806 }
5807
5808 /* we don't schedule scan within next_scan_jiffies period */
5809 if (priv->next_scan_jiffies &&
5810 time_after(priv->next_scan_jiffies, jiffies)) {
5811 rc = -EAGAIN;
5812 goto out_unlock;
5813 }
5814 /* if we just finished scan ask for delay for a broadcast scan */
5815 if ((len == 0) && priv->last_scan_jiffies &&
5816 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
5817 jiffies)) {
5818 rc = -EAGAIN;
5819 goto out_unlock;
5820 }
5821 if (len) {
5822 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
5823 print_ssid(ssid_buf, ssid, len), (int)len);
5824
5825 priv->one_direct_scan = 1;
5826 priv->direct_ssid_len = (u8)
5827 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
5828 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
5829 } else
5830 priv->one_direct_scan = 0;
5831
5832 rc = iwl3945_scan_initiate(priv);
5833
5834 IWL_DEBUG_MAC80211("leave\n");
5835
5836 out_unlock:
5837 spin_unlock_irqrestore(&priv->lock, flags);
5838 mutex_unlock(&priv->mutex);
5839
5840 return rc;
5841 }
5842
5843 static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5844 struct ieee80211_vif *vif,
5845 struct ieee80211_sta *sta,
5846 struct ieee80211_key_conf *key)
5847 {
5848 struct iwl_priv *priv = hw->priv;
5849 const u8 *addr;
5850 int ret;
5851 u8 sta_id;
5852
5853 IWL_DEBUG_MAC80211("enter\n");
5854
5855 if (iwl3945_mod_params.sw_crypto) {
5856 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
5857 return -EOPNOTSUPP;
5858 }
5859
5860 addr = sta ? sta->addr : iwl_bcast_addr;
5861 sta_id = iwl3945_hw_find_station(priv, addr);
5862 if (sta_id == IWL_INVALID_STATION) {
5863 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
5864 addr);
5865 return -EINVAL;
5866 }
5867
5868 mutex_lock(&priv->mutex);
5869
5870 iwl_scan_cancel_timeout(priv, 100);
5871
5872 switch (cmd) {
5873 case SET_KEY:
5874 ret = iwl3945_update_sta_key_info(priv, key, sta_id);
5875 if (!ret) {
5876 iwl3945_set_rxon_hwcrypto(priv, 1);
5877 iwl3945_commit_rxon(priv);
5878 key->hw_key_idx = sta_id;
5879 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
5880 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
5881 }
5882 break;
5883 case DISABLE_KEY:
5884 ret = iwl3945_clear_sta_key_info(priv, sta_id);
5885 if (!ret) {
5886 iwl3945_set_rxon_hwcrypto(priv, 0);
5887 iwl3945_commit_rxon(priv);
5888 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
5889 }
5890 break;
5891 default:
5892 ret = -EINVAL;
5893 }
5894
5895 IWL_DEBUG_MAC80211("leave\n");
5896 mutex_unlock(&priv->mutex);
5897
5898 return ret;
5899 }
5900
5901 static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
5902 const struct ieee80211_tx_queue_params *params)
5903 {
5904 struct iwl_priv *priv = hw->priv;
5905 unsigned long flags;
5906 int q;
5907
5908 IWL_DEBUG_MAC80211("enter\n");
5909
5910 if (!iwl_is_ready_rf(priv)) {
5911 IWL_DEBUG_MAC80211("leave - RF not ready\n");
5912 return -EIO;
5913 }
5914
5915 if (queue >= AC_NUM) {
5916 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
5917 return 0;
5918 }
5919
5920 q = AC_NUM - 1 - queue;
5921
5922 spin_lock_irqsave(&priv->lock, flags);
5923
5924 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
5925 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
5926 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
5927 priv->qos_data.def_qos_parm.ac[q].edca_txop =
5928 cpu_to_le16((params->txop * 32));
5929
5930 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
5931 priv->qos_data.qos_active = 1;
5932
5933 spin_unlock_irqrestore(&priv->lock, flags);
5934
5935 mutex_lock(&priv->mutex);
5936 if (priv->iw_mode == NL80211_IFTYPE_AP)
5937 iwl3945_activate_qos(priv, 1);
5938 else if (priv->assoc_id && iwl3945_is_associated(priv))
5939 iwl3945_activate_qos(priv, 0);
5940
5941 mutex_unlock(&priv->mutex);
5942
5943 IWL_DEBUG_MAC80211("leave\n");
5944 return 0;
5945 }
5946
5947 static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
5948 struct ieee80211_tx_queue_stats *stats)
5949 {
5950 struct iwl_priv *priv = hw->priv;
5951 int i, avail;
5952 struct iwl_tx_queue *txq;
5953 struct iwl_queue *q;
5954 unsigned long flags;
5955
5956 IWL_DEBUG_MAC80211("enter\n");
5957
5958 if (!iwl_is_ready_rf(priv)) {
5959 IWL_DEBUG_MAC80211("leave - RF not ready\n");
5960 return -EIO;
5961 }
5962
5963 spin_lock_irqsave(&priv->lock, flags);
5964
5965 for (i = 0; i < AC_NUM; i++) {
5966 txq = &priv->txq[i];
5967 q = &txq->q;
5968 avail = iwl_queue_space(q);
5969
5970 stats[i].len = q->n_window - avail;
5971 stats[i].limit = q->n_window - q->high_mark;
5972 stats[i].count = q->n_window;
5973
5974 }
5975 spin_unlock_irqrestore(&priv->lock, flags);
5976
5977 IWL_DEBUG_MAC80211("leave\n");
5978
5979 return 0;
5980 }
5981
5982 static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
5983 {
5984 struct iwl_priv *priv = hw->priv;
5985 unsigned long flags;
5986
5987 mutex_lock(&priv->mutex);
5988 IWL_DEBUG_MAC80211("enter\n");
5989
5990 iwl_reset_qos(priv);
5991
5992 spin_lock_irqsave(&priv->lock, flags);
5993 priv->assoc_id = 0;
5994 priv->assoc_capability = 0;
5995
5996 /* new association get rid of ibss beacon skb */
5997 if (priv->ibss_beacon)
5998 dev_kfree_skb(priv->ibss_beacon);
5999
6000 priv->ibss_beacon = NULL;
6001
6002 priv->beacon_int = priv->hw->conf.beacon_int;
6003 priv->timestamp = 0;
6004 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
6005 priv->beacon_int = 0;
6006
6007 spin_unlock_irqrestore(&priv->lock, flags);
6008
6009 if (!iwl_is_ready_rf(priv)) {
6010 IWL_DEBUG_MAC80211("leave - not ready\n");
6011 mutex_unlock(&priv->mutex);
6012 return;
6013 }
6014
6015 /* we are restarting association process
6016 * clear RXON_FILTER_ASSOC_MSK bit
6017 */
6018 if (priv->iw_mode != NL80211_IFTYPE_AP) {
6019 iwl_scan_cancel_timeout(priv, 100);
6020 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6021 iwl3945_commit_rxon(priv);
6022 }
6023
6024 /* Per mac80211.h: This is only used in IBSS mode... */
6025 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
6026
6027 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
6028 mutex_unlock(&priv->mutex);
6029 return;
6030 }
6031
6032 iwl3945_set_rate(priv);
6033
6034 mutex_unlock(&priv->mutex);
6035
6036 IWL_DEBUG_MAC80211("leave\n");
6037
6038 }
6039
6040 static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
6041 {
6042 struct iwl_priv *priv = hw->priv;
6043 unsigned long flags;
6044
6045 IWL_DEBUG_MAC80211("enter\n");
6046
6047 if (!iwl_is_ready_rf(priv)) {
6048 IWL_DEBUG_MAC80211("leave - RF not ready\n");
6049 return -EIO;
6050 }
6051
6052 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
6053 IWL_DEBUG_MAC80211("leave - not IBSS\n");
6054 return -EIO;
6055 }
6056
6057 spin_lock_irqsave(&priv->lock, flags);
6058
6059 if (priv->ibss_beacon)
6060 dev_kfree_skb(priv->ibss_beacon);
6061
6062 priv->ibss_beacon = skb;
6063
6064 priv->assoc_id = 0;
6065
6066 IWL_DEBUG_MAC80211("leave\n");
6067 spin_unlock_irqrestore(&priv->lock, flags);
6068
6069 iwl_reset_qos(priv);
6070
6071 iwl3945_post_associate(priv);
6072
6073
6074 return 0;
6075 }
6076
6077 /*****************************************************************************
6078 *
6079 * sysfs attributes
6080 *
6081 *****************************************************************************/
6082
6083 #ifdef CONFIG_IWL3945_DEBUG
6084
6085 /*
6086 * The following adds a new attribute to the sysfs representation
6087 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
6088 * used for controlling the debug level.
6089 *
6090 * See the level definitions in iwl for details.
6091 */
6092 static ssize_t show_debug_level(struct device *d,
6093 struct device_attribute *attr, char *buf)
6094 {
6095 struct iwl_priv *priv = d->driver_data;
6096
6097 return sprintf(buf, "0x%08X\n", priv->debug_level);
6098 }
6099 static ssize_t store_debug_level(struct device *d,
6100 struct device_attribute *attr,
6101 const char *buf, size_t count)
6102 {
6103 struct iwl_priv *priv = d->driver_data;
6104 unsigned long val;
6105 int ret;
6106
6107 ret = strict_strtoul(buf, 0, &val);
6108 if (ret)
6109 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
6110 else
6111 priv->debug_level = val;
6112
6113 return strnlen(buf, count);
6114 }
6115
6116 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
6117 show_debug_level, store_debug_level);
6118
6119 #endif /* CONFIG_IWL3945_DEBUG */
6120
6121 static ssize_t show_temperature(struct device *d,
6122 struct device_attribute *attr, char *buf)
6123 {
6124 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6125
6126 if (!iwl_is_alive(priv))
6127 return -EAGAIN;
6128
6129 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
6130 }
6131
6132 static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
6133
6134 static ssize_t show_tx_power(struct device *d,
6135 struct device_attribute *attr, char *buf)
6136 {
6137 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6138 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
6139 }
6140
6141 static ssize_t store_tx_power(struct device *d,
6142 struct device_attribute *attr,
6143 const char *buf, size_t count)
6144 {
6145 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6146 char *p = (char *)buf;
6147 u32 val;
6148
6149 val = simple_strtoul(p, &p, 10);
6150 if (p == buf)
6151 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
6152 else
6153 iwl3945_hw_reg_set_txpower(priv, val);
6154
6155 return count;
6156 }
6157
6158 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
6159
6160 static ssize_t show_flags(struct device *d,
6161 struct device_attribute *attr, char *buf)
6162 {
6163 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6164
6165 return sprintf(buf, "0x%04X\n", priv->active39_rxon.flags);
6166 }
6167
6168 static ssize_t store_flags(struct device *d,
6169 struct device_attribute *attr,
6170 const char *buf, size_t count)
6171 {
6172 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6173 u32 flags = simple_strtoul(buf, NULL, 0);
6174
6175 mutex_lock(&priv->mutex);
6176 if (le32_to_cpu(priv->staging39_rxon.flags) != flags) {
6177 /* Cancel any currently running scans... */
6178 if (iwl_scan_cancel_timeout(priv, 100))
6179 IWL_WARN(priv, "Could not cancel scan.\n");
6180 else {
6181 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
6182 flags);
6183 priv->staging39_rxon.flags = cpu_to_le32(flags);
6184 iwl3945_commit_rxon(priv);
6185 }
6186 }
6187 mutex_unlock(&priv->mutex);
6188
6189 return count;
6190 }
6191
6192 static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
6193
6194 static ssize_t show_filter_flags(struct device *d,
6195 struct device_attribute *attr, char *buf)
6196 {
6197 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6198
6199 return sprintf(buf, "0x%04X\n",
6200 le32_to_cpu(priv->active39_rxon.filter_flags));
6201 }
6202
6203 static ssize_t store_filter_flags(struct device *d,
6204 struct device_attribute *attr,
6205 const char *buf, size_t count)
6206 {
6207 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6208 u32 filter_flags = simple_strtoul(buf, NULL, 0);
6209
6210 mutex_lock(&priv->mutex);
6211 if (le32_to_cpu(priv->staging39_rxon.filter_flags) != filter_flags) {
6212 /* Cancel any currently running scans... */
6213 if (iwl_scan_cancel_timeout(priv, 100))
6214 IWL_WARN(priv, "Could not cancel scan.\n");
6215 else {
6216 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
6217 "0x%04X\n", filter_flags);
6218 priv->staging39_rxon.filter_flags =
6219 cpu_to_le32(filter_flags);
6220 iwl3945_commit_rxon(priv);
6221 }
6222 }
6223 mutex_unlock(&priv->mutex);
6224
6225 return count;
6226 }
6227
6228 static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
6229 store_filter_flags);
6230
6231 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
6232
6233 static ssize_t show_measurement(struct device *d,
6234 struct device_attribute *attr, char *buf)
6235 {
6236 struct iwl_priv *priv = dev_get_drvdata(d);
6237 struct iwl_spectrum_notification measure_report;
6238 u32 size = sizeof(measure_report), len = 0, ofs = 0;
6239 u8 *data = (u8 *)&measure_report;
6240 unsigned long flags;
6241
6242 spin_lock_irqsave(&priv->lock, flags);
6243 if (!(priv->measurement_status & MEASUREMENT_READY)) {
6244 spin_unlock_irqrestore(&priv->lock, flags);
6245 return 0;
6246 }
6247 memcpy(&measure_report, &priv->measure_report, size);
6248 priv->measurement_status = 0;
6249 spin_unlock_irqrestore(&priv->lock, flags);
6250
6251 while (size && (PAGE_SIZE - len)) {
6252 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6253 PAGE_SIZE - len, 1);
6254 len = strlen(buf);
6255 if (PAGE_SIZE - len)
6256 buf[len++] = '\n';
6257
6258 ofs += 16;
6259 size -= min(size, 16U);
6260 }
6261
6262 return len;
6263 }
6264
6265 static ssize_t store_measurement(struct device *d,
6266 struct device_attribute *attr,
6267 const char *buf, size_t count)
6268 {
6269 struct iwl_priv *priv = dev_get_drvdata(d);
6270 struct ieee80211_measurement_params params = {
6271 .channel = le16_to_cpu(priv->active39_rxon.channel),
6272 .start_time = cpu_to_le64(priv->last_tsf),
6273 .duration = cpu_to_le16(1),
6274 };
6275 u8 type = IWL_MEASURE_BASIC;
6276 u8 buffer[32];
6277 u8 channel;
6278
6279 if (count) {
6280 char *p = buffer;
6281 strncpy(buffer, buf, min(sizeof(buffer), count));
6282 channel = simple_strtoul(p, NULL, 0);
6283 if (channel)
6284 params.channel = channel;
6285
6286 p = buffer;
6287 while (*p && *p != ' ')
6288 p++;
6289 if (*p)
6290 type = simple_strtoul(p + 1, NULL, 0);
6291 }
6292
6293 IWL_DEBUG_INFO("Invoking measurement of type %d on "
6294 "channel %d (for '%s')\n", type, params.channel, buf);
6295 iwl3945_get_measurement(priv, &params, type);
6296
6297 return count;
6298 }
6299
6300 static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
6301 show_measurement, store_measurement);
6302 #endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
6303
6304 static ssize_t store_retry_rate(struct device *d,
6305 struct device_attribute *attr,
6306 const char *buf, size_t count)
6307 {
6308 struct iwl_priv *priv = dev_get_drvdata(d);
6309
6310 priv->retry_rate = simple_strtoul(buf, NULL, 0);
6311 if (priv->retry_rate <= 0)
6312 priv->retry_rate = 1;
6313
6314 return count;
6315 }
6316
6317 static ssize_t show_retry_rate(struct device *d,
6318 struct device_attribute *attr, char *buf)
6319 {
6320 struct iwl_priv *priv = dev_get_drvdata(d);
6321 return sprintf(buf, "%d", priv->retry_rate);
6322 }
6323
6324 static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
6325 store_retry_rate);
6326
6327 static ssize_t store_power_level(struct device *d,
6328 struct device_attribute *attr,
6329 const char *buf, size_t count)
6330 {
6331 struct iwl_priv *priv = dev_get_drvdata(d);
6332 int rc;
6333 int mode;
6334
6335 mode = simple_strtoul(buf, NULL, 0);
6336 mutex_lock(&priv->mutex);
6337
6338 if (!iwl_is_ready(priv)) {
6339 rc = -EAGAIN;
6340 goto out;
6341 }
6342
6343 if ((mode < 1) || (mode > IWL39_POWER_LIMIT) ||
6344 (mode == IWL39_POWER_AC))
6345 mode = IWL39_POWER_AC;
6346 else
6347 mode |= IWL_POWER_ENABLED;
6348
6349 if (mode != priv->power_mode) {
6350 rc = iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(mode));
6351 if (rc) {
6352 IWL_DEBUG_MAC80211("failed setting power mode.\n");
6353 goto out;
6354 }
6355 priv->power_mode = mode;
6356 }
6357
6358 rc = count;
6359
6360 out:
6361 mutex_unlock(&priv->mutex);
6362 return rc;
6363 }
6364
6365 #define MAX_WX_STRING 80
6366
6367 /* Values are in microsecond */
6368 static const s32 timeout_duration[] = {
6369 350000,
6370 250000,
6371 75000,
6372 37000,
6373 25000,
6374 };
6375 static const s32 period_duration[] = {
6376 400000,
6377 700000,
6378 1000000,
6379 1000000,
6380 1000000
6381 };
6382
6383 static ssize_t show_power_level(struct device *d,
6384 struct device_attribute *attr, char *buf)
6385 {
6386 struct iwl_priv *priv = dev_get_drvdata(d);
6387 int level = IWL_POWER_LEVEL(priv->power_mode);
6388 char *p = buf;
6389
6390 p += sprintf(p, "%d ", level);
6391 switch (level) {
6392 case IWL_POWER_MODE_CAM:
6393 case IWL39_POWER_AC:
6394 p += sprintf(p, "(AC)");
6395 break;
6396 case IWL39_POWER_BATTERY:
6397 p += sprintf(p, "(BATTERY)");
6398 break;
6399 default:
6400 p += sprintf(p,
6401 "(Timeout %dms, Period %dms)",
6402 timeout_duration[level - 1] / 1000,
6403 period_duration[level - 1] / 1000);
6404 }
6405
6406 if (!(priv->power_mode & IWL_POWER_ENABLED))
6407 p += sprintf(p, " OFF\n");
6408 else
6409 p += sprintf(p, " \n");
6410
6411 return p - buf + 1;
6412
6413 }
6414
6415 static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
6416 store_power_level);
6417
6418 static ssize_t show_channels(struct device *d,
6419 struct device_attribute *attr, char *buf)
6420 {
6421 /* all this shit doesn't belong into sysfs anyway */
6422 return 0;
6423 }
6424
6425 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
6426
6427 static ssize_t show_statistics(struct device *d,
6428 struct device_attribute *attr, char *buf)
6429 {
6430 struct iwl_priv *priv = dev_get_drvdata(d);
6431 u32 size = sizeof(struct iwl3945_notif_statistics);
6432 u32 len = 0, ofs = 0;
6433 u8 *data = (u8 *)&priv->statistics_39;
6434 int rc = 0;
6435
6436 if (!iwl_is_alive(priv))
6437 return -EAGAIN;
6438
6439 mutex_lock(&priv->mutex);
6440 rc = iwl3945_send_statistics_request(priv);
6441 mutex_unlock(&priv->mutex);
6442
6443 if (rc) {
6444 len = sprintf(buf,
6445 "Error sending statistics request: 0x%08X\n", rc);
6446 return len;
6447 }
6448
6449 while (size && (PAGE_SIZE - len)) {
6450 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6451 PAGE_SIZE - len, 1);
6452 len = strlen(buf);
6453 if (PAGE_SIZE - len)
6454 buf[len++] = '\n';
6455
6456 ofs += 16;
6457 size -= min(size, 16U);
6458 }
6459
6460 return len;
6461 }
6462
6463 static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
6464
6465 static ssize_t show_antenna(struct device *d,
6466 struct device_attribute *attr, char *buf)
6467 {
6468 struct iwl_priv *priv = dev_get_drvdata(d);
6469
6470 if (!iwl_is_alive(priv))
6471 return -EAGAIN;
6472
6473 return sprintf(buf, "%d\n", priv->antenna);
6474 }
6475
6476 static ssize_t store_antenna(struct device *d,
6477 struct device_attribute *attr,
6478 const char *buf, size_t count)
6479 {
6480 int ant;
6481 struct iwl_priv *priv = dev_get_drvdata(d);
6482
6483 if (count == 0)
6484 return 0;
6485
6486 if (sscanf(buf, "%1i", &ant) != 1) {
6487 IWL_DEBUG_INFO("not in hex or decimal form.\n");
6488 return count;
6489 }
6490
6491 if ((ant >= 0) && (ant <= 2)) {
6492 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
6493 priv->antenna = (enum iwl3945_antenna)ant;
6494 } else
6495 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
6496
6497
6498 return count;
6499 }
6500
6501 static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
6502
6503 static ssize_t show_status(struct device *d,
6504 struct device_attribute *attr, char *buf)
6505 {
6506 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
6507 if (!iwl_is_alive(priv))
6508 return -EAGAIN;
6509 return sprintf(buf, "0x%08x\n", (int)priv->status);
6510 }
6511
6512 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
6513
6514 static ssize_t dump_error_log(struct device *d,
6515 struct device_attribute *attr,
6516 const char *buf, size_t count)
6517 {
6518 char *p = (char *)buf;
6519
6520 if (p[0] == '1')
6521 iwl3945_dump_nic_error_log((struct iwl_priv *)d->driver_data);
6522
6523 return strnlen(buf, count);
6524 }
6525
6526 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
6527
6528 static ssize_t dump_event_log(struct device *d,
6529 struct device_attribute *attr,
6530 const char *buf, size_t count)
6531 {
6532 char *p = (char *)buf;
6533
6534 if (p[0] == '1')
6535 iwl3945_dump_nic_event_log((struct iwl_priv *)d->driver_data);
6536
6537 return strnlen(buf, count);
6538 }
6539
6540 static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
6541
6542 /*****************************************************************************
6543 *
6544 * driver setup and tear down
6545 *
6546 *****************************************************************************/
6547
6548 static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
6549 {
6550 priv->workqueue = create_workqueue(DRV_NAME);
6551
6552 init_waitqueue_head(&priv->wait_command_queue);
6553
6554 INIT_WORK(&priv->up, iwl3945_bg_up);
6555 INIT_WORK(&priv->restart, iwl3945_bg_restart);
6556 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
6557 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
6558 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
6559 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
6560 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
6561 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
6562 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
6563 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
6564 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
6565 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
6566
6567 iwl3945_hw_setup_deferred_work(priv);
6568
6569 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6570 iwl3945_irq_tasklet, (unsigned long)priv);
6571 }
6572
6573 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
6574 {
6575 iwl3945_hw_cancel_deferred_work(priv);
6576
6577 cancel_delayed_work_sync(&priv->init_alive_start);
6578 cancel_delayed_work(&priv->scan_check);
6579 cancel_delayed_work(&priv->alive_start);
6580 cancel_work_sync(&priv->beacon_update);
6581 }
6582
6583 static struct attribute *iwl3945_sysfs_entries[] = {
6584 &dev_attr_antenna.attr,
6585 &dev_attr_channels.attr,
6586 &dev_attr_dump_errors.attr,
6587 &dev_attr_dump_events.attr,
6588 &dev_attr_flags.attr,
6589 &dev_attr_filter_flags.attr,
6590 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
6591 &dev_attr_measurement.attr,
6592 #endif
6593 &dev_attr_power_level.attr,
6594 &dev_attr_retry_rate.attr,
6595 &dev_attr_statistics.attr,
6596 &dev_attr_status.attr,
6597 &dev_attr_temperature.attr,
6598 &dev_attr_tx_power.attr,
6599 #ifdef CONFIG_IWL3945_DEBUG
6600 &dev_attr_debug_level.attr,
6601 #endif
6602 NULL
6603 };
6604
6605 static struct attribute_group iwl3945_attribute_group = {
6606 .name = NULL, /* put in device directory */
6607 .attrs = iwl3945_sysfs_entries,
6608 };
6609
6610 static struct ieee80211_ops iwl3945_hw_ops = {
6611 .tx = iwl3945_mac_tx,
6612 .start = iwl3945_mac_start,
6613 .stop = iwl3945_mac_stop,
6614 .add_interface = iwl3945_mac_add_interface,
6615 .remove_interface = iwl3945_mac_remove_interface,
6616 .config = iwl3945_mac_config,
6617 .config_interface = iwl3945_mac_config_interface,
6618 .configure_filter = iwl3945_configure_filter,
6619 .set_key = iwl3945_mac_set_key,
6620 .get_tx_stats = iwl3945_mac_get_tx_stats,
6621 .conf_tx = iwl3945_mac_conf_tx,
6622 .reset_tsf = iwl3945_mac_reset_tsf,
6623 .bss_info_changed = iwl3945_bss_info_changed,
6624 .hw_scan = iwl3945_mac_hw_scan
6625 };
6626
6627 static int iwl3945_init_drv(struct iwl_priv *priv)
6628 {
6629 int ret;
6630
6631 priv->retry_rate = 1;
6632 priv->ibss_beacon = NULL;
6633
6634 spin_lock_init(&priv->lock);
6635 spin_lock_init(&priv->power_data.lock);
6636 spin_lock_init(&priv->sta_lock);
6637 spin_lock_init(&priv->hcmd_lock);
6638
6639 INIT_LIST_HEAD(&priv->free_frames);
6640
6641 mutex_init(&priv->mutex);
6642
6643 /* Clear the driver's (not device's) station table */
6644 iwl3945_clear_stations_table(priv);
6645
6646 priv->data_retry_limit = -1;
6647 priv->ieee_channels = NULL;
6648 priv->ieee_rates = NULL;
6649 priv->band = IEEE80211_BAND_2GHZ;
6650
6651 priv->iw_mode = NL80211_IFTYPE_STATION;
6652
6653 iwl_reset_qos(priv);
6654
6655 priv->qos_data.qos_active = 0;
6656 priv->qos_data.qos_cap.val = 0;
6657
6658 priv->rates_mask = IWL_RATES_MASK;
6659 /* If power management is turned on, default to AC mode */
6660 priv->power_mode = IWL39_POWER_AC;
6661 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
6662
6663 ret = iwl3945_init_channel_map(priv);
6664 if (ret) {
6665 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
6666 goto err;
6667 }
6668
6669 ret = iwl3945_init_geos(priv);
6670 if (ret) {
6671 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
6672 goto err_free_channel_map;
6673 }
6674
6675 return 0;
6676
6677 err_free_channel_map:
6678 iwl3945_free_channel_map(priv);
6679 err:
6680 return ret;
6681 }
6682
6683 static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6684 {
6685 int err = 0;
6686 struct iwl_priv *priv;
6687 struct ieee80211_hw *hw;
6688 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
6689 unsigned long flags;
6690
6691 /***********************
6692 * 1. Allocating HW data
6693 * ********************/
6694
6695 /* mac80211 allocates memory for this device instance, including
6696 * space for this driver's private structure */
6697 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
6698 if (hw == NULL) {
6699 printk(KERN_ERR DRV_NAME "Can not allocate network device\n");
6700 err = -ENOMEM;
6701 goto out;
6702 }
6703 priv = hw->priv;
6704 SET_IEEE80211_DEV(hw, &pdev->dev);
6705
6706 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
6707 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) {
6708 IWL_ERR(priv,
6709 "invalid queues_num, should be between %d and %d\n",
6710 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
6711 err = -EINVAL;
6712 goto out;
6713 }
6714
6715 /*
6716 * Disabling hardware scan means that mac80211 will perform scans
6717 * "the hard way", rather than using device's scan.
6718 */
6719 if (iwl3945_mod_params.disable_hw_scan) {
6720 IWL_DEBUG_INFO("Disabling hw_scan\n");
6721 iwl3945_hw_ops.hw_scan = NULL;
6722 }
6723
6724
6725 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
6726 priv->cfg = cfg;
6727 priv->pci_dev = pdev;
6728
6729 #ifdef CONFIG_IWL3945_DEBUG
6730 priv->debug_level = iwl3945_mod_params.debug;
6731 atomic_set(&priv->restrict_refcnt, 0);
6732 #endif
6733 hw->rate_control_algorithm = "iwl-3945-rs";
6734 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
6735
6736 /* Select antenna (may be helpful if only one antenna is connected) */
6737 priv->antenna = (enum iwl3945_antenna)iwl3945_mod_params.antenna;
6738
6739 /* Tell mac80211 our characteristics */
6740 hw->flags = IEEE80211_HW_SIGNAL_DBM |
6741 IEEE80211_HW_NOISE_DBM;
6742
6743 hw->wiphy->interface_modes =
6744 BIT(NL80211_IFTYPE_STATION) |
6745 BIT(NL80211_IFTYPE_ADHOC);
6746
6747 hw->wiphy->custom_regulatory = true;
6748
6749 /* 4 EDCA QOS priorities */
6750 hw->queues = 4;
6751
6752 /***************************
6753 * 2. Initializing PCI bus
6754 * *************************/
6755 if (pci_enable_device(pdev)) {
6756 err = -ENODEV;
6757 goto out_ieee80211_free_hw;
6758 }
6759
6760 pci_set_master(pdev);
6761
6762 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6763 if (!err)
6764 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
6765 if (err) {
6766 IWL_WARN(priv, "No suitable DMA available.\n");
6767 goto out_pci_disable_device;
6768 }
6769
6770 pci_set_drvdata(pdev, priv);
6771 err = pci_request_regions(pdev, DRV_NAME);
6772 if (err)
6773 goto out_pci_disable_device;
6774
6775 /***********************
6776 * 3. Read REV Register
6777 * ********************/
6778 priv->hw_base = pci_iomap(pdev, 0, 0);
6779 if (!priv->hw_base) {
6780 err = -ENODEV;
6781 goto out_pci_release_regions;
6782 }
6783
6784 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
6785 (unsigned long long) pci_resource_len(pdev, 0));
6786 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
6787
6788 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6789 * PCI Tx retries from interfering with C3 CPU state */
6790 pci_write_config_byte(pdev, 0x41, 0x00);
6791
6792 /* amp init */
6793 err = priv->cfg->ops->lib->apm_ops.init(priv);
6794 if (err < 0) {
6795 IWL_DEBUG_INFO("Failed to init APMG\n");
6796 goto out_iounmap;
6797 }
6798
6799 /***********************
6800 * 4. Read EEPROM
6801 * ********************/
6802
6803 /* Read the EEPROM */
6804 err = iwl3945_eeprom_init(priv);
6805 if (err) {
6806 IWL_ERR(priv, "Unable to init EEPROM\n");
6807 goto out_remove_sysfs;
6808 }
6809 /* MAC Address location in EEPROM same for 3945/4965 */
6810 get_eeprom_mac(priv, priv->mac_addr);
6811 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
6812 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6813
6814 /***********************
6815 * 5. Setup HW Constants
6816 * ********************/
6817 /* Device-specific setup */
6818 if (iwl3945_hw_set_hw_params(priv)) {
6819 IWL_ERR(priv, "failed to set hw settings\n");
6820 goto out_iounmap;
6821 }
6822
6823 /***********************
6824 * 6. Setup priv
6825 * ********************/
6826
6827 err = iwl3945_init_drv(priv);
6828 if (err) {
6829 IWL_ERR(priv, "initializing driver failed\n");
6830 goto out_free_geos;
6831 }
6832
6833 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
6834 priv->cfg->name);
6835
6836 /***********************************
6837 * 7. Initialize Module Parameters
6838 * **********************************/
6839
6840 /* Initialize module parameter values here */
6841 /* Disable radio (SW RF KILL) via parameter when loading driver */
6842 if (iwl3945_mod_params.disable) {
6843 set_bit(STATUS_RF_KILL_SW, &priv->status);
6844 IWL_DEBUG_INFO("Radio disabled.\n");
6845 }
6846
6847
6848 /***********************
6849 * 8. Setup Services
6850 * ********************/
6851
6852 spin_lock_irqsave(&priv->lock, flags);
6853 iwl3945_disable_interrupts(priv);
6854 spin_unlock_irqrestore(&priv->lock, flags);
6855
6856 pci_enable_msi(priv->pci_dev);
6857
6858 err = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
6859 DRV_NAME, priv);
6860 if (err) {
6861 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
6862 goto out_disable_msi;
6863 }
6864
6865 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
6866 if (err) {
6867 IWL_ERR(priv, "failed to create sysfs device attributes\n");
6868 goto out_release_irq;
6869 }
6870
6871 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
6872 iwl3945_setup_deferred_work(priv);
6873 iwl3945_setup_rx_handlers(priv);
6874
6875 /*********************************
6876 * 9. Setup and Register mac80211
6877 * *******************************/
6878
6879 err = ieee80211_register_hw(priv->hw);
6880 if (err) {
6881 IWL_ERR(priv, "Failed to register network device: %d\n", err);
6882 goto out_remove_sysfs;
6883 }
6884
6885 priv->hw->conf.beacon_int = 100;
6886 priv->mac80211_registered = 1;
6887
6888 err = iwl_rfkill_init(priv);
6889 if (err)
6890 IWL_ERR(priv, "Unable to initialize RFKILL system. "
6891 "Ignoring error: %d\n", err);
6892
6893 /* Start monitoring the killswitch */
6894 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
6895 2 * HZ);
6896
6897 return 0;
6898
6899 out_remove_sysfs:
6900 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
6901 out_free_geos:
6902 iwl3945_free_geos(priv);
6903
6904 out_release_irq:
6905 free_irq(priv->pci_dev->irq, priv);
6906 destroy_workqueue(priv->workqueue);
6907 priv->workqueue = NULL;
6908 iwl3945_unset_hw_params(priv);
6909 out_disable_msi:
6910 pci_disable_msi(priv->pci_dev);
6911 out_iounmap:
6912 pci_iounmap(pdev, priv->hw_base);
6913 out_pci_release_regions:
6914 pci_release_regions(pdev);
6915 out_pci_disable_device:
6916 pci_disable_device(pdev);
6917 pci_set_drvdata(pdev, NULL);
6918 out_ieee80211_free_hw:
6919 ieee80211_free_hw(priv->hw);
6920 out:
6921 return err;
6922 }
6923
6924 static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
6925 {
6926 struct iwl_priv *priv = pci_get_drvdata(pdev);
6927 unsigned long flags;
6928
6929 if (!priv)
6930 return;
6931
6932 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
6933
6934 set_bit(STATUS_EXIT_PENDING, &priv->status);
6935
6936 if (priv->mac80211_registered) {
6937 ieee80211_unregister_hw(priv->hw);
6938 priv->mac80211_registered = 0;
6939 } else {
6940 iwl3945_down(priv);
6941 }
6942
6943 /* make sure we flush any pending irq or
6944 * tasklet for the driver
6945 */
6946 spin_lock_irqsave(&priv->lock, flags);
6947 iwl3945_disable_interrupts(priv);
6948 spin_unlock_irqrestore(&priv->lock, flags);
6949
6950 iwl_synchronize_irq(priv);
6951
6952 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
6953
6954 iwl_rfkill_unregister(priv);
6955 cancel_delayed_work(&priv->rfkill_poll);
6956
6957 iwl3945_dealloc_ucode_pci(priv);
6958
6959 if (priv->rxq.bd)
6960 iwl_rx_queue_free(priv, &priv->rxq);
6961 iwl3945_hw_txq_ctx_free(priv);
6962
6963 iwl3945_unset_hw_params(priv);
6964 iwl3945_clear_stations_table(priv);
6965
6966 /*netif_stop_queue(dev); */
6967 flush_workqueue(priv->workqueue);
6968
6969 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
6970 * priv->workqueue... so we can't take down the workqueue
6971 * until now... */
6972 destroy_workqueue(priv->workqueue);
6973 priv->workqueue = NULL;
6974
6975 free_irq(pdev->irq, priv);
6976 pci_disable_msi(pdev);
6977
6978 pci_iounmap(pdev, priv->hw_base);
6979 pci_release_regions(pdev);
6980 pci_disable_device(pdev);
6981 pci_set_drvdata(pdev, NULL);
6982
6983 iwl3945_free_channel_map(priv);
6984 iwl3945_free_geos(priv);
6985 kfree(priv->scan);
6986 if (priv->ibss_beacon)
6987 dev_kfree_skb(priv->ibss_beacon);
6988
6989 ieee80211_free_hw(priv->hw);
6990 }
6991
6992 #ifdef CONFIG_PM
6993
6994 static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
6995 {
6996 struct iwl_priv *priv = pci_get_drvdata(pdev);
6997
6998 if (priv->is_open) {
6999 set_bit(STATUS_IN_SUSPEND, &priv->status);
7000 iwl3945_mac_stop(priv->hw);
7001 priv->is_open = 1;
7002 }
7003 pci_save_state(pdev);
7004 pci_disable_device(pdev);
7005 pci_set_power_state(pdev, PCI_D3hot);
7006
7007 return 0;
7008 }
7009
7010 static int iwl3945_pci_resume(struct pci_dev *pdev)
7011 {
7012 struct iwl_priv *priv = pci_get_drvdata(pdev);
7013
7014 pci_set_power_state(pdev, PCI_D0);
7015 pci_enable_device(pdev);
7016 pci_restore_state(pdev);
7017
7018 if (priv->is_open)
7019 iwl3945_mac_start(priv->hw);
7020
7021 clear_bit(STATUS_IN_SUSPEND, &priv->status);
7022 return 0;
7023 }
7024
7025 #endif /* CONFIG_PM */
7026
7027 /*****************************************************************************
7028 *
7029 * driver and module entry point
7030 *
7031 *****************************************************************************/
7032
7033 static struct pci_driver iwl3945_driver = {
7034 .name = DRV_NAME,
7035 .id_table = iwl3945_hw_card_ids,
7036 .probe = iwl3945_pci_probe,
7037 .remove = __devexit_p(iwl3945_pci_remove),
7038 #ifdef CONFIG_PM
7039 .suspend = iwl3945_pci_suspend,
7040 .resume = iwl3945_pci_resume,
7041 #endif
7042 };
7043
7044 static int __init iwl3945_init(void)
7045 {
7046
7047 int ret;
7048 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7049 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
7050
7051 ret = iwl3945_rate_control_register();
7052 if (ret) {
7053 printk(KERN_ERR DRV_NAME
7054 "Unable to register rate control algorithm: %d\n", ret);
7055 return ret;
7056 }
7057
7058 ret = pci_register_driver(&iwl3945_driver);
7059 if (ret) {
7060 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n");
7061 goto error_register;
7062 }
7063
7064 return ret;
7065
7066 error_register:
7067 iwl3945_rate_control_unregister();
7068 return ret;
7069 }
7070
7071 static void __exit iwl3945_exit(void)
7072 {
7073 pci_unregister_driver(&iwl3945_driver);
7074 iwl3945_rate_control_unregister();
7075 }
7076
7077 MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
7078
7079 module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
7080 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
7081 module_param_named(disable, iwl3945_mod_params.disable, int, 0444);
7082 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
7083 module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
7084 MODULE_PARM_DESC(swcrypto,
7085 "using software crypto (default 1 [software])\n");
7086 module_param_named(debug, iwl3945_mod_params.debug, uint, 0444);
7087 MODULE_PARM_DESC(debug, "debug output mask");
7088 module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444);
7089 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
7090
7091 module_param_named(queues_num, iwl3945_mod_params.num_of_queues, int, 0444);
7092 MODULE_PARM_DESC(queues_num, "number of hw queues.");
7093
7094 module_exit(iwl3945_exit);
7095 module_init(iwl3945_init);