Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37
38
39 #ifndef KBUILD_EXTMOD
40 #define VK "k"
41 #else
42 #define VK
43 #endif
44
45 #ifdef CONFIG_IPW2200_DEBUG
46 #define VD "d"
47 #else
48 #define VD
49 #endif
50
51 #ifdef CONFIG_IPW2200_MONITOR
52 #define VM "m"
53 #else
54 #define VM
55 #endif
56
57 #ifdef CONFIG_IPW2200_PROMISCUOUS
58 #define VP "p"
59 #else
60 #define VP
61 #endif
62
63 #ifdef CONFIG_IPW2200_RADIOTAP
64 #define VR "r"
65 #else
66 #define VR
67 #endif
68
69 #ifdef CONFIG_IPW2200_QOS
70 #define VQ "q"
71 #else
72 #define VQ
73 #endif
74
75 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
76 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
77 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
78 #define DRV_VERSION IPW2200_VERSION
79
80 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
81
82 MODULE_DESCRIPTION(DRV_DESCRIPTION);
83 MODULE_VERSION(DRV_VERSION);
84 MODULE_AUTHOR(DRV_COPYRIGHT);
85 MODULE_LICENSE("GPL");
86 MODULE_FIRMWARE("ipw2200-ibss.fw");
87 #ifdef CONFIG_IPW2200_MONITOR
88 MODULE_FIRMWARE("ipw2200-sniffer.fw");
89 #endif
90 MODULE_FIRMWARE("ipw2200-bss.fw");
91
92 static int cmdlog = 0;
93 static int debug = 0;
94 static int default_channel = 0;
95 static int network_mode = 0;
96
97 static u32 ipw_debug_level;
98 static int associate;
99 static int auto_create = 1;
100 static int led_support = 1;
101 static int disable = 0;
102 static int bt_coexist = 0;
103 static int hwcrypto = 0;
104 static int roaming = 1;
105 static const char ipw_modes[] = {
106 'a', 'b', 'g', '?'
107 };
108 static int antenna = CFG_SYS_ANTENNA_BOTH;
109
110 #ifdef CONFIG_IPW2200_PROMISCUOUS
111 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
112 #endif
113
114 static struct ieee80211_rate ipw2200_rates[] = {
115 { .bitrate = 10 },
116 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
117 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 { .bitrate = 60 },
120 { .bitrate = 90 },
121 { .bitrate = 120 },
122 { .bitrate = 180 },
123 { .bitrate = 240 },
124 { .bitrate = 360 },
125 { .bitrate = 480 },
126 { .bitrate = 540 }
127 };
128
129 #define ipw2200_a_rates (ipw2200_rates + 4)
130 #define ipw2200_num_a_rates 8
131 #define ipw2200_bg_rates (ipw2200_rates + 0)
132 #define ipw2200_num_bg_rates 12
133
134 /* Ugly macro to convert literal channel numbers into their mhz equivalents
135 * There are certianly some conditions that will break this (like feeding it '30')
136 * but they shouldn't arise since nothing talks on channel 30. */
137 #define ieee80211chan2mhz(x) \
138 (((x) <= 14) ? \
139 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
140 ((x) + 1000) * 5)
141
142 #ifdef CONFIG_IPW2200_QOS
143 static int qos_enable = 0;
144 static int qos_burst_enable = 0;
145 static int qos_no_ack_mask = 0;
146 static int burst_duration_CCK = 0;
147 static int burst_duration_OFDM = 0;
148
149 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
150 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
151 QOS_TX3_CW_MIN_OFDM},
152 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
153 QOS_TX3_CW_MAX_OFDM},
154 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
155 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
156 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
157 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
158 };
159
160 static struct libipw_qos_parameters def_qos_parameters_CCK = {
161 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
162 QOS_TX3_CW_MIN_CCK},
163 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
164 QOS_TX3_CW_MAX_CCK},
165 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
166 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
167 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
168 QOS_TX3_TXOP_LIMIT_CCK}
169 };
170
171 static struct libipw_qos_parameters def_parameters_OFDM = {
172 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
173 DEF_TX3_CW_MIN_OFDM},
174 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
175 DEF_TX3_CW_MAX_OFDM},
176 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
177 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
178 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
179 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
180 };
181
182 static struct libipw_qos_parameters def_parameters_CCK = {
183 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
184 DEF_TX3_CW_MIN_CCK},
185 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
186 DEF_TX3_CW_MAX_CCK},
187 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
188 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
189 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
190 DEF_TX3_TXOP_LIMIT_CCK}
191 };
192
193 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
194
195 static int from_priority_to_tx_queue[] = {
196 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
197 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
198 };
199
200 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
201
202 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
203 *qos_param);
204 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
205 *qos_param);
206 #endif /* CONFIG_IPW2200_QOS */
207
208 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
209 static void ipw_remove_current_network(struct ipw_priv *priv);
210 static void ipw_rx(struct ipw_priv *priv);
211 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
212 struct clx2_tx_queue *txq, int qindex);
213 static int ipw_queue_reset(struct ipw_priv *priv);
214
215 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
216 int len, int sync);
217
218 static void ipw_tx_queue_free(struct ipw_priv *);
219
220 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
221 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
222 static void ipw_rx_queue_replenish(void *);
223 static int ipw_up(struct ipw_priv *);
224 static void ipw_bg_up(struct work_struct *work);
225 static void ipw_down(struct ipw_priv *);
226 static void ipw_bg_down(struct work_struct *work);
227 static int ipw_config(struct ipw_priv *);
228 static int init_supported_rates(struct ipw_priv *priv,
229 struct ipw_supported_rates *prates);
230 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
231 static void ipw_send_wep_keys(struct ipw_priv *, int);
232
233 static int snprint_line(char *buf, size_t count,
234 const u8 * data, u32 len, u32 ofs)
235 {
236 int out, i, j, l;
237 char c;
238
239 out = snprintf(buf, count, "%08X", ofs);
240
241 for (l = 0, i = 0; i < 2; i++) {
242 out += snprintf(buf + out, count - out, " ");
243 for (j = 0; j < 8 && l < len; j++, l++)
244 out += snprintf(buf + out, count - out, "%02X ",
245 data[(i * 8 + j)]);
246 for (; j < 8; j++)
247 out += snprintf(buf + out, count - out, " ");
248 }
249
250 out += snprintf(buf + out, count - out, " ");
251 for (l = 0, i = 0; i < 2; i++) {
252 out += snprintf(buf + out, count - out, " ");
253 for (j = 0; j < 8 && l < len; j++, l++) {
254 c = data[(i * 8 + j)];
255 if (!isascii(c) || !isprint(c))
256 c = '.';
257
258 out += snprintf(buf + out, count - out, "%c", c);
259 }
260
261 for (; j < 8; j++)
262 out += snprintf(buf + out, count - out, " ");
263 }
264
265 return out;
266 }
267
268 static void printk_buf(int level, const u8 * data, u32 len)
269 {
270 char line[81];
271 u32 ofs = 0;
272 if (!(ipw_debug_level & level))
273 return;
274
275 while (len) {
276 snprint_line(line, sizeof(line), &data[ofs],
277 min(len, 16U), ofs);
278 printk(KERN_DEBUG "%s\n", line);
279 ofs += 16;
280 len -= min(len, 16U);
281 }
282 }
283
284 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
285 {
286 size_t out = size;
287 u32 ofs = 0;
288 int total = 0;
289
290 while (size && len) {
291 out = snprint_line(output, size, &data[ofs],
292 min_t(size_t, len, 16U), ofs);
293
294 ofs += 16;
295 output += out;
296 size -= out;
297 len -= min_t(size_t, len, 16U);
298 total += out;
299 }
300 return total;
301 }
302
303 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
304 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
305 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
306
307 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
308 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
309 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
310
311 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
312 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
313 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
314 {
315 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
316 __LINE__, (u32) (b), (u32) (c));
317 _ipw_write_reg8(a, b, c);
318 }
319
320 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
321 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
322 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
323 {
324 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
325 __LINE__, (u32) (b), (u32) (c));
326 _ipw_write_reg16(a, b, c);
327 }
328
329 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
330 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
331 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
332 {
333 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
334 __LINE__, (u32) (b), (u32) (c));
335 _ipw_write_reg32(a, b, c);
336 }
337
338 /* 8-bit direct write (low 4K) */
339 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
340 u8 val)
341 {
342 writeb(val, ipw->hw_base + ofs);
343 }
344
345 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
346 #define ipw_write8(ipw, ofs, val) do { \
347 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
348 __LINE__, (u32)(ofs), (u32)(val)); \
349 _ipw_write8(ipw, ofs, val); \
350 } while (0)
351
352 /* 16-bit direct write (low 4K) */
353 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
354 u16 val)
355 {
356 writew(val, ipw->hw_base + ofs);
357 }
358
359 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
360 #define ipw_write16(ipw, ofs, val) do { \
361 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
362 __LINE__, (u32)(ofs), (u32)(val)); \
363 _ipw_write16(ipw, ofs, val); \
364 } while (0)
365
366 /* 32-bit direct write (low 4K) */
367 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
368 u32 val)
369 {
370 writel(val, ipw->hw_base + ofs);
371 }
372
373 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
374 #define ipw_write32(ipw, ofs, val) do { \
375 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
376 __LINE__, (u32)(ofs), (u32)(val)); \
377 _ipw_write32(ipw, ofs, val); \
378 } while (0)
379
380 /* 8-bit direct read (low 4K) */
381 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
382 {
383 return readb(ipw->hw_base + ofs);
384 }
385
386 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
387 #define ipw_read8(ipw, ofs) ({ \
388 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
389 (u32)(ofs)); \
390 _ipw_read8(ipw, ofs); \
391 })
392
393 /* 16-bit direct read (low 4K) */
394 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
395 {
396 return readw(ipw->hw_base + ofs);
397 }
398
399 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
400 #define ipw_read16(ipw, ofs) ({ \
401 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
402 (u32)(ofs)); \
403 _ipw_read16(ipw, ofs); \
404 })
405
406 /* 32-bit direct read (low 4K) */
407 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
408 {
409 return readl(ipw->hw_base + ofs);
410 }
411
412 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
413 #define ipw_read32(ipw, ofs) ({ \
414 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
415 (u32)(ofs)); \
416 _ipw_read32(ipw, ofs); \
417 })
418
419 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
420 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
421 #define ipw_read_indirect(a, b, c, d) ({ \
422 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
423 __LINE__, (u32)(b), (u32)(d)); \
424 _ipw_read_indirect(a, b, c, d); \
425 })
426
427 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
428 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
429 int num);
430 #define ipw_write_indirect(a, b, c, d) do { \
431 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
432 __LINE__, (u32)(b), (u32)(d)); \
433 _ipw_write_indirect(a, b, c, d); \
434 } while (0)
435
436 /* 32-bit indirect write (above 4K) */
437 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
438 {
439 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
440 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
441 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
442 }
443
444 /* 8-bit indirect write (above 4K) */
445 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
446 {
447 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
448 u32 dif_len = reg - aligned_addr;
449
450 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
451 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
452 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
453 }
454
455 /* 16-bit indirect write (above 4K) */
456 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
457 {
458 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
459 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
460
461 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
462 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
463 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
464 }
465
466 /* 8-bit indirect read (above 4K) */
467 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
468 {
469 u32 word;
470 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
471 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
472 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
473 return (word >> ((reg & 0x3) * 8)) & 0xff;
474 }
475
476 /* 32-bit indirect read (above 4K) */
477 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
478 {
479 u32 value;
480
481 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
482
483 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
484 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
485 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
486 return value;
487 }
488
489 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
490 /* for area above 1st 4K of SRAM/reg space */
491 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
492 int num)
493 {
494 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
495 u32 dif_len = addr - aligned_addr;
496 u32 i;
497
498 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
499
500 if (num <= 0) {
501 return;
502 }
503
504 /* Read the first dword (or portion) byte by byte */
505 if (unlikely(dif_len)) {
506 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
507 /* Start reading at aligned_addr + dif_len */
508 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
509 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
510 aligned_addr += 4;
511 }
512
513 /* Read all of the middle dwords as dwords, with auto-increment */
514 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
515 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
516 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
517
518 /* Read the last dword (or portion) byte by byte */
519 if (unlikely(num)) {
520 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
521 for (i = 0; num > 0; i++, num--)
522 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
523 }
524 }
525
526 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
527 /* for area above 1st 4K of SRAM/reg space */
528 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
529 int num)
530 {
531 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
532 u32 dif_len = addr - aligned_addr;
533 u32 i;
534
535 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
536
537 if (num <= 0) {
538 return;
539 }
540
541 /* Write the first dword (or portion) byte by byte */
542 if (unlikely(dif_len)) {
543 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
544 /* Start writing at aligned_addr + dif_len */
545 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
546 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
547 aligned_addr += 4;
548 }
549
550 /* Write all of the middle dwords as dwords, with auto-increment */
551 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
552 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
553 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
554
555 /* Write the last dword (or portion) byte by byte */
556 if (unlikely(num)) {
557 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
558 for (i = 0; num > 0; i++, num--, buf++)
559 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
560 }
561 }
562
563 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
564 /* for 1st 4K of SRAM/regs space */
565 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
566 int num)
567 {
568 memcpy_toio((priv->hw_base + addr), buf, num);
569 }
570
571 /* Set bit(s) in low 4K of SRAM/regs */
572 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
573 {
574 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
575 }
576
577 /* Clear bit(s) in low 4K of SRAM/regs */
578 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
579 {
580 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
581 }
582
583 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
584 {
585 if (priv->status & STATUS_INT_ENABLED)
586 return;
587 priv->status |= STATUS_INT_ENABLED;
588 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
589 }
590
591 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
592 {
593 if (!(priv->status & STATUS_INT_ENABLED))
594 return;
595 priv->status &= ~STATUS_INT_ENABLED;
596 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
597 }
598
599 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
600 {
601 unsigned long flags;
602
603 spin_lock_irqsave(&priv->irq_lock, flags);
604 __ipw_enable_interrupts(priv);
605 spin_unlock_irqrestore(&priv->irq_lock, flags);
606 }
607
608 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
609 {
610 unsigned long flags;
611
612 spin_lock_irqsave(&priv->irq_lock, flags);
613 __ipw_disable_interrupts(priv);
614 spin_unlock_irqrestore(&priv->irq_lock, flags);
615 }
616
617 static char *ipw_error_desc(u32 val)
618 {
619 switch (val) {
620 case IPW_FW_ERROR_OK:
621 return "ERROR_OK";
622 case IPW_FW_ERROR_FAIL:
623 return "ERROR_FAIL";
624 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
625 return "MEMORY_UNDERFLOW";
626 case IPW_FW_ERROR_MEMORY_OVERFLOW:
627 return "MEMORY_OVERFLOW";
628 case IPW_FW_ERROR_BAD_PARAM:
629 return "BAD_PARAM";
630 case IPW_FW_ERROR_BAD_CHECKSUM:
631 return "BAD_CHECKSUM";
632 case IPW_FW_ERROR_NMI_INTERRUPT:
633 return "NMI_INTERRUPT";
634 case IPW_FW_ERROR_BAD_DATABASE:
635 return "BAD_DATABASE";
636 case IPW_FW_ERROR_ALLOC_FAIL:
637 return "ALLOC_FAIL";
638 case IPW_FW_ERROR_DMA_UNDERRUN:
639 return "DMA_UNDERRUN";
640 case IPW_FW_ERROR_DMA_STATUS:
641 return "DMA_STATUS";
642 case IPW_FW_ERROR_DINO_ERROR:
643 return "DINO_ERROR";
644 case IPW_FW_ERROR_EEPROM_ERROR:
645 return "EEPROM_ERROR";
646 case IPW_FW_ERROR_SYSASSERT:
647 return "SYSASSERT";
648 case IPW_FW_ERROR_FATAL_ERROR:
649 return "FATAL_ERROR";
650 default:
651 return "UNKNOWN_ERROR";
652 }
653 }
654
655 static void ipw_dump_error_log(struct ipw_priv *priv,
656 struct ipw_fw_error *error)
657 {
658 u32 i;
659
660 if (!error) {
661 IPW_ERROR("Error allocating and capturing error log. "
662 "Nothing to dump.\n");
663 return;
664 }
665
666 IPW_ERROR("Start IPW Error Log Dump:\n");
667 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
668 error->status, error->config);
669
670 for (i = 0; i < error->elem_len; i++)
671 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
672 ipw_error_desc(error->elem[i].desc),
673 error->elem[i].time,
674 error->elem[i].blink1,
675 error->elem[i].blink2,
676 error->elem[i].link1,
677 error->elem[i].link2, error->elem[i].data);
678 for (i = 0; i < error->log_len; i++)
679 IPW_ERROR("%i\t0x%08x\t%i\n",
680 error->log[i].time,
681 error->log[i].data, error->log[i].event);
682 }
683
684 static inline int ipw_is_init(struct ipw_priv *priv)
685 {
686 return (priv->status & STATUS_INIT) ? 1 : 0;
687 }
688
689 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
690 {
691 u32 addr, field_info, field_len, field_count, total_len;
692
693 IPW_DEBUG_ORD("ordinal = %i\n", ord);
694
695 if (!priv || !val || !len) {
696 IPW_DEBUG_ORD("Invalid argument\n");
697 return -EINVAL;
698 }
699
700 /* verify device ordinal tables have been initialized */
701 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
702 IPW_DEBUG_ORD("Access ordinals before initialization\n");
703 return -EINVAL;
704 }
705
706 switch (IPW_ORD_TABLE_ID_MASK & ord) {
707 case IPW_ORD_TABLE_0_MASK:
708 /*
709 * TABLE 0: Direct access to a table of 32 bit values
710 *
711 * This is a very simple table with the data directly
712 * read from the table
713 */
714
715 /* remove the table id from the ordinal */
716 ord &= IPW_ORD_TABLE_VALUE_MASK;
717
718 /* boundary check */
719 if (ord > priv->table0_len) {
720 IPW_DEBUG_ORD("ordinal value (%i) longer then "
721 "max (%i)\n", ord, priv->table0_len);
722 return -EINVAL;
723 }
724
725 /* verify we have enough room to store the value */
726 if (*len < sizeof(u32)) {
727 IPW_DEBUG_ORD("ordinal buffer length too small, "
728 "need %zd\n", sizeof(u32));
729 return -EINVAL;
730 }
731
732 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
733 ord, priv->table0_addr + (ord << 2));
734
735 *len = sizeof(u32);
736 ord <<= 2;
737 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
738 break;
739
740 case IPW_ORD_TABLE_1_MASK:
741 /*
742 * TABLE 1: Indirect access to a table of 32 bit values
743 *
744 * This is a fairly large table of u32 values each
745 * representing starting addr for the data (which is
746 * also a u32)
747 */
748
749 /* remove the table id from the ordinal */
750 ord &= IPW_ORD_TABLE_VALUE_MASK;
751
752 /* boundary check */
753 if (ord > priv->table1_len) {
754 IPW_DEBUG_ORD("ordinal value too long\n");
755 return -EINVAL;
756 }
757
758 /* verify we have enough room to store the value */
759 if (*len < sizeof(u32)) {
760 IPW_DEBUG_ORD("ordinal buffer length too small, "
761 "need %zd\n", sizeof(u32));
762 return -EINVAL;
763 }
764
765 *((u32 *) val) =
766 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
767 *len = sizeof(u32);
768 break;
769
770 case IPW_ORD_TABLE_2_MASK:
771 /*
772 * TABLE 2: Indirect access to a table of variable sized values
773 *
774 * This table consist of six values, each containing
775 * - dword containing the starting offset of the data
776 * - dword containing the lengh in the first 16bits
777 * and the count in the second 16bits
778 */
779
780 /* remove the table id from the ordinal */
781 ord &= IPW_ORD_TABLE_VALUE_MASK;
782
783 /* boundary check */
784 if (ord > priv->table2_len) {
785 IPW_DEBUG_ORD("ordinal value too long\n");
786 return -EINVAL;
787 }
788
789 /* get the address of statistic */
790 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
791
792 /* get the second DW of statistics ;
793 * two 16-bit words - first is length, second is count */
794 field_info =
795 ipw_read_reg32(priv,
796 priv->table2_addr + (ord << 3) +
797 sizeof(u32));
798
799 /* get each entry length */
800 field_len = *((u16 *) & field_info);
801
802 /* get number of entries */
803 field_count = *(((u16 *) & field_info) + 1);
804
805 /* abort if not enough memory */
806 total_len = field_len * field_count;
807 if (total_len > *len) {
808 *len = total_len;
809 return -EINVAL;
810 }
811
812 *len = total_len;
813 if (!total_len)
814 return 0;
815
816 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
817 "field_info = 0x%08x\n",
818 addr, total_len, field_info);
819 ipw_read_indirect(priv, addr, val, total_len);
820 break;
821
822 default:
823 IPW_DEBUG_ORD("Invalid ordinal!\n");
824 return -EINVAL;
825
826 }
827
828 return 0;
829 }
830
831 static void ipw_init_ordinals(struct ipw_priv *priv)
832 {
833 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
834 priv->table0_len = ipw_read32(priv, priv->table0_addr);
835
836 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
837 priv->table0_addr, priv->table0_len);
838
839 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
840 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
841
842 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
843 priv->table1_addr, priv->table1_len);
844
845 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
846 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
847 priv->table2_len &= 0x0000ffff; /* use first two bytes */
848
849 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
850 priv->table2_addr, priv->table2_len);
851
852 }
853
854 static u32 ipw_register_toggle(u32 reg)
855 {
856 reg &= ~IPW_START_STANDBY;
857 if (reg & IPW_GATE_ODMA)
858 reg &= ~IPW_GATE_ODMA;
859 if (reg & IPW_GATE_IDMA)
860 reg &= ~IPW_GATE_IDMA;
861 if (reg & IPW_GATE_ADMA)
862 reg &= ~IPW_GATE_ADMA;
863 return reg;
864 }
865
866 /*
867 * LED behavior:
868 * - On radio ON, turn on any LEDs that require to be on during start
869 * - On initialization, start unassociated blink
870 * - On association, disable unassociated blink
871 * - On disassociation, start unassociated blink
872 * - On radio OFF, turn off any LEDs started during radio on
873 *
874 */
875 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
876 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
877 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
878
879 static void ipw_led_link_on(struct ipw_priv *priv)
880 {
881 unsigned long flags;
882 u32 led;
883
884 /* If configured to not use LEDs, or nic_type is 1,
885 * then we don't toggle a LINK led */
886 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
887 return;
888
889 spin_lock_irqsave(&priv->lock, flags);
890
891 if (!(priv->status & STATUS_RF_KILL_MASK) &&
892 !(priv->status & STATUS_LED_LINK_ON)) {
893 IPW_DEBUG_LED("Link LED On\n");
894 led = ipw_read_reg32(priv, IPW_EVENT_REG);
895 led |= priv->led_association_on;
896
897 led = ipw_register_toggle(led);
898
899 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
900 ipw_write_reg32(priv, IPW_EVENT_REG, led);
901
902 priv->status |= STATUS_LED_LINK_ON;
903
904 /* If we aren't associated, schedule turning the LED off */
905 if (!(priv->status & STATUS_ASSOCIATED))
906 schedule_delayed_work(&priv->led_link_off,
907 LD_TIME_LINK_ON);
908 }
909
910 spin_unlock_irqrestore(&priv->lock, flags);
911 }
912
913 static void ipw_bg_led_link_on(struct work_struct *work)
914 {
915 struct ipw_priv *priv =
916 container_of(work, struct ipw_priv, led_link_on.work);
917 mutex_lock(&priv->mutex);
918 ipw_led_link_on(priv);
919 mutex_unlock(&priv->mutex);
920 }
921
922 static void ipw_led_link_off(struct ipw_priv *priv)
923 {
924 unsigned long flags;
925 u32 led;
926
927 /* If configured not to use LEDs, or nic type is 1,
928 * then we don't goggle the LINK led. */
929 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
930 return;
931
932 spin_lock_irqsave(&priv->lock, flags);
933
934 if (priv->status & STATUS_LED_LINK_ON) {
935 led = ipw_read_reg32(priv, IPW_EVENT_REG);
936 led &= priv->led_association_off;
937 led = ipw_register_toggle(led);
938
939 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
940 ipw_write_reg32(priv, IPW_EVENT_REG, led);
941
942 IPW_DEBUG_LED("Link LED Off\n");
943
944 priv->status &= ~STATUS_LED_LINK_ON;
945
946 /* If we aren't associated and the radio is on, schedule
947 * turning the LED on (blink while unassociated) */
948 if (!(priv->status & STATUS_RF_KILL_MASK) &&
949 !(priv->status & STATUS_ASSOCIATED))
950 schedule_delayed_work(&priv->led_link_on,
951 LD_TIME_LINK_OFF);
952
953 }
954
955 spin_unlock_irqrestore(&priv->lock, flags);
956 }
957
958 static void ipw_bg_led_link_off(struct work_struct *work)
959 {
960 struct ipw_priv *priv =
961 container_of(work, struct ipw_priv, led_link_off.work);
962 mutex_lock(&priv->mutex);
963 ipw_led_link_off(priv);
964 mutex_unlock(&priv->mutex);
965 }
966
967 static void __ipw_led_activity_on(struct ipw_priv *priv)
968 {
969 u32 led;
970
971 if (priv->config & CFG_NO_LED)
972 return;
973
974 if (priv->status & STATUS_RF_KILL_MASK)
975 return;
976
977 if (!(priv->status & STATUS_LED_ACT_ON)) {
978 led = ipw_read_reg32(priv, IPW_EVENT_REG);
979 led |= priv->led_activity_on;
980
981 led = ipw_register_toggle(led);
982
983 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
984 ipw_write_reg32(priv, IPW_EVENT_REG, led);
985
986 IPW_DEBUG_LED("Activity LED On\n");
987
988 priv->status |= STATUS_LED_ACT_ON;
989
990 cancel_delayed_work(&priv->led_act_off);
991 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
992 } else {
993 /* Reschedule LED off for full time period */
994 cancel_delayed_work(&priv->led_act_off);
995 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
996 }
997 }
998
999 #if 0
1000 void ipw_led_activity_on(struct ipw_priv *priv)
1001 {
1002 unsigned long flags;
1003 spin_lock_irqsave(&priv->lock, flags);
1004 __ipw_led_activity_on(priv);
1005 spin_unlock_irqrestore(&priv->lock, flags);
1006 }
1007 #endif /* 0 */
1008
1009 static void ipw_led_activity_off(struct ipw_priv *priv)
1010 {
1011 unsigned long flags;
1012 u32 led;
1013
1014 if (priv->config & CFG_NO_LED)
1015 return;
1016
1017 spin_lock_irqsave(&priv->lock, flags);
1018
1019 if (priv->status & STATUS_LED_ACT_ON) {
1020 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1021 led &= priv->led_activity_off;
1022
1023 led = ipw_register_toggle(led);
1024
1025 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1026 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1027
1028 IPW_DEBUG_LED("Activity LED Off\n");
1029
1030 priv->status &= ~STATUS_LED_ACT_ON;
1031 }
1032
1033 spin_unlock_irqrestore(&priv->lock, flags);
1034 }
1035
1036 static void ipw_bg_led_activity_off(struct work_struct *work)
1037 {
1038 struct ipw_priv *priv =
1039 container_of(work, struct ipw_priv, led_act_off.work);
1040 mutex_lock(&priv->mutex);
1041 ipw_led_activity_off(priv);
1042 mutex_unlock(&priv->mutex);
1043 }
1044
1045 static void ipw_led_band_on(struct ipw_priv *priv)
1046 {
1047 unsigned long flags;
1048 u32 led;
1049
1050 /* Only nic type 1 supports mode LEDs */
1051 if (priv->config & CFG_NO_LED ||
1052 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1053 return;
1054
1055 spin_lock_irqsave(&priv->lock, flags);
1056
1057 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1058 if (priv->assoc_network->mode == IEEE_A) {
1059 led |= priv->led_ofdm_on;
1060 led &= priv->led_association_off;
1061 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1062 } else if (priv->assoc_network->mode == IEEE_G) {
1063 led |= priv->led_ofdm_on;
1064 led |= priv->led_association_on;
1065 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1066 } else {
1067 led &= priv->led_ofdm_off;
1068 led |= priv->led_association_on;
1069 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1070 }
1071
1072 led = ipw_register_toggle(led);
1073
1074 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1075 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1076
1077 spin_unlock_irqrestore(&priv->lock, flags);
1078 }
1079
1080 static void ipw_led_band_off(struct ipw_priv *priv)
1081 {
1082 unsigned long flags;
1083 u32 led;
1084
1085 /* Only nic type 1 supports mode LEDs */
1086 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1087 return;
1088
1089 spin_lock_irqsave(&priv->lock, flags);
1090
1091 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1092 led &= priv->led_ofdm_off;
1093 led &= priv->led_association_off;
1094
1095 led = ipw_register_toggle(led);
1096
1097 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1098 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1099
1100 spin_unlock_irqrestore(&priv->lock, flags);
1101 }
1102
1103 static void ipw_led_radio_on(struct ipw_priv *priv)
1104 {
1105 ipw_led_link_on(priv);
1106 }
1107
1108 static void ipw_led_radio_off(struct ipw_priv *priv)
1109 {
1110 ipw_led_activity_off(priv);
1111 ipw_led_link_off(priv);
1112 }
1113
1114 static void ipw_led_link_up(struct ipw_priv *priv)
1115 {
1116 /* Set the Link Led on for all nic types */
1117 ipw_led_link_on(priv);
1118 }
1119
1120 static void ipw_led_link_down(struct ipw_priv *priv)
1121 {
1122 ipw_led_activity_off(priv);
1123 ipw_led_link_off(priv);
1124
1125 if (priv->status & STATUS_RF_KILL_MASK)
1126 ipw_led_radio_off(priv);
1127 }
1128
1129 static void ipw_led_init(struct ipw_priv *priv)
1130 {
1131 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1132
1133 /* Set the default PINs for the link and activity leds */
1134 priv->led_activity_on = IPW_ACTIVITY_LED;
1135 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1136
1137 priv->led_association_on = IPW_ASSOCIATED_LED;
1138 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1139
1140 /* Set the default PINs for the OFDM leds */
1141 priv->led_ofdm_on = IPW_OFDM_LED;
1142 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1143
1144 switch (priv->nic_type) {
1145 case EEPROM_NIC_TYPE_1:
1146 /* In this NIC type, the LEDs are reversed.... */
1147 priv->led_activity_on = IPW_ASSOCIATED_LED;
1148 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1149 priv->led_association_on = IPW_ACTIVITY_LED;
1150 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1151
1152 if (!(priv->config & CFG_NO_LED))
1153 ipw_led_band_on(priv);
1154
1155 /* And we don't blink link LEDs for this nic, so
1156 * just return here */
1157 return;
1158
1159 case EEPROM_NIC_TYPE_3:
1160 case EEPROM_NIC_TYPE_2:
1161 case EEPROM_NIC_TYPE_4:
1162 case EEPROM_NIC_TYPE_0:
1163 break;
1164
1165 default:
1166 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1167 priv->nic_type);
1168 priv->nic_type = EEPROM_NIC_TYPE_0;
1169 break;
1170 }
1171
1172 if (!(priv->config & CFG_NO_LED)) {
1173 if (priv->status & STATUS_ASSOCIATED)
1174 ipw_led_link_on(priv);
1175 else
1176 ipw_led_link_off(priv);
1177 }
1178 }
1179
1180 static void ipw_led_shutdown(struct ipw_priv *priv)
1181 {
1182 ipw_led_activity_off(priv);
1183 ipw_led_link_off(priv);
1184 ipw_led_band_off(priv);
1185 cancel_delayed_work(&priv->led_link_on);
1186 cancel_delayed_work(&priv->led_link_off);
1187 cancel_delayed_work(&priv->led_act_off);
1188 }
1189
1190 /*
1191 * The following adds a new attribute to the sysfs representation
1192 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1193 * used for controlling the debug level.
1194 *
1195 * See the level definitions in ipw for details.
1196 */
1197 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1198 {
1199 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1200 }
1201
1202 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1203 size_t count)
1204 {
1205 char *p = (char *)buf;
1206 u32 val;
1207
1208 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1209 p++;
1210 if (p[0] == 'x' || p[0] == 'X')
1211 p++;
1212 val = simple_strtoul(p, &p, 16);
1213 } else
1214 val = simple_strtoul(p, &p, 10);
1215 if (p == buf)
1216 printk(KERN_INFO DRV_NAME
1217 ": %s is not in hex or decimal form.\n", buf);
1218 else
1219 ipw_debug_level = val;
1220
1221 return strnlen(buf, count);
1222 }
1223
1224 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1225 show_debug_level, store_debug_level);
1226
1227 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1228 {
1229 /* length = 1st dword in log */
1230 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1231 }
1232
1233 static void ipw_capture_event_log(struct ipw_priv *priv,
1234 u32 log_len, struct ipw_event *log)
1235 {
1236 u32 base;
1237
1238 if (log_len) {
1239 base = ipw_read32(priv, IPW_EVENT_LOG);
1240 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1241 (u8 *) log, sizeof(*log) * log_len);
1242 }
1243 }
1244
1245 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1246 {
1247 struct ipw_fw_error *error;
1248 u32 log_len = ipw_get_event_log_len(priv);
1249 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1250 u32 elem_len = ipw_read_reg32(priv, base);
1251
1252 error = kmalloc(sizeof(*error) +
1253 sizeof(*error->elem) * elem_len +
1254 sizeof(*error->log) * log_len, GFP_ATOMIC);
1255 if (!error) {
1256 IPW_ERROR("Memory allocation for firmware error log "
1257 "failed.\n");
1258 return NULL;
1259 }
1260 error->jiffies = jiffies;
1261 error->status = priv->status;
1262 error->config = priv->config;
1263 error->elem_len = elem_len;
1264 error->log_len = log_len;
1265 error->elem = (struct ipw_error_elem *)error->payload;
1266 error->log = (struct ipw_event *)(error->elem + elem_len);
1267
1268 ipw_capture_event_log(priv, log_len, error->log);
1269
1270 if (elem_len)
1271 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1272 sizeof(*error->elem) * elem_len);
1273
1274 return error;
1275 }
1276
1277 static ssize_t show_event_log(struct device *d,
1278 struct device_attribute *attr, char *buf)
1279 {
1280 struct ipw_priv *priv = dev_get_drvdata(d);
1281 u32 log_len = ipw_get_event_log_len(priv);
1282 u32 log_size;
1283 struct ipw_event *log;
1284 u32 len = 0, i;
1285
1286 /* not using min() because of its strict type checking */
1287 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1288 sizeof(*log) * log_len : PAGE_SIZE;
1289 log = kzalloc(log_size, GFP_KERNEL);
1290 if (!log) {
1291 IPW_ERROR("Unable to allocate memory for log\n");
1292 return 0;
1293 }
1294 log_len = log_size / sizeof(*log);
1295 ipw_capture_event_log(priv, log_len, log);
1296
1297 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1298 for (i = 0; i < log_len; i++)
1299 len += snprintf(buf + len, PAGE_SIZE - len,
1300 "\n%08X%08X%08X",
1301 log[i].time, log[i].event, log[i].data);
1302 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1303 kfree(log);
1304 return len;
1305 }
1306
1307 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1308
1309 static ssize_t show_error(struct device *d,
1310 struct device_attribute *attr, char *buf)
1311 {
1312 struct ipw_priv *priv = dev_get_drvdata(d);
1313 u32 len = 0, i;
1314 if (!priv->error)
1315 return 0;
1316 len += snprintf(buf + len, PAGE_SIZE - len,
1317 "%08lX%08X%08X%08X",
1318 priv->error->jiffies,
1319 priv->error->status,
1320 priv->error->config, priv->error->elem_len);
1321 for (i = 0; i < priv->error->elem_len; i++)
1322 len += snprintf(buf + len, PAGE_SIZE - len,
1323 "\n%08X%08X%08X%08X%08X%08X%08X",
1324 priv->error->elem[i].time,
1325 priv->error->elem[i].desc,
1326 priv->error->elem[i].blink1,
1327 priv->error->elem[i].blink2,
1328 priv->error->elem[i].link1,
1329 priv->error->elem[i].link2,
1330 priv->error->elem[i].data);
1331
1332 len += snprintf(buf + len, PAGE_SIZE - len,
1333 "\n%08X", priv->error->log_len);
1334 for (i = 0; i < priv->error->log_len; i++)
1335 len += snprintf(buf + len, PAGE_SIZE - len,
1336 "\n%08X%08X%08X",
1337 priv->error->log[i].time,
1338 priv->error->log[i].event,
1339 priv->error->log[i].data);
1340 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1341 return len;
1342 }
1343
1344 static ssize_t clear_error(struct device *d,
1345 struct device_attribute *attr,
1346 const char *buf, size_t count)
1347 {
1348 struct ipw_priv *priv = dev_get_drvdata(d);
1349
1350 kfree(priv->error);
1351 priv->error = NULL;
1352 return count;
1353 }
1354
1355 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1356
1357 static ssize_t show_cmd_log(struct device *d,
1358 struct device_attribute *attr, char *buf)
1359 {
1360 struct ipw_priv *priv = dev_get_drvdata(d);
1361 u32 len = 0, i;
1362 if (!priv->cmdlog)
1363 return 0;
1364 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1365 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1366 i = (i + 1) % priv->cmdlog_len) {
1367 len +=
1368 snprintf(buf + len, PAGE_SIZE - len,
1369 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1370 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1371 priv->cmdlog[i].cmd.len);
1372 len +=
1373 snprintk_buf(buf + len, PAGE_SIZE - len,
1374 (u8 *) priv->cmdlog[i].cmd.param,
1375 priv->cmdlog[i].cmd.len);
1376 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1377 }
1378 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1379 return len;
1380 }
1381
1382 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1383
1384 #ifdef CONFIG_IPW2200_PROMISCUOUS
1385 static void ipw_prom_free(struct ipw_priv *priv);
1386 static int ipw_prom_alloc(struct ipw_priv *priv);
1387 static ssize_t store_rtap_iface(struct device *d,
1388 struct device_attribute *attr,
1389 const char *buf, size_t count)
1390 {
1391 struct ipw_priv *priv = dev_get_drvdata(d);
1392 int rc = 0;
1393
1394 if (count < 1)
1395 return -EINVAL;
1396
1397 switch (buf[0]) {
1398 case '0':
1399 if (!rtap_iface)
1400 return count;
1401
1402 if (netif_running(priv->prom_net_dev)) {
1403 IPW_WARNING("Interface is up. Cannot unregister.\n");
1404 return count;
1405 }
1406
1407 ipw_prom_free(priv);
1408 rtap_iface = 0;
1409 break;
1410
1411 case '1':
1412 if (rtap_iface)
1413 return count;
1414
1415 rc = ipw_prom_alloc(priv);
1416 if (!rc)
1417 rtap_iface = 1;
1418 break;
1419
1420 default:
1421 return -EINVAL;
1422 }
1423
1424 if (rc) {
1425 IPW_ERROR("Failed to register promiscuous network "
1426 "device (error %d).\n", rc);
1427 }
1428
1429 return count;
1430 }
1431
1432 static ssize_t show_rtap_iface(struct device *d,
1433 struct device_attribute *attr,
1434 char *buf)
1435 {
1436 struct ipw_priv *priv = dev_get_drvdata(d);
1437 if (rtap_iface)
1438 return sprintf(buf, "%s", priv->prom_net_dev->name);
1439 else {
1440 buf[0] = '-';
1441 buf[1] = '1';
1442 buf[2] = '\0';
1443 return 3;
1444 }
1445 }
1446
1447 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1448 store_rtap_iface);
1449
1450 static ssize_t store_rtap_filter(struct device *d,
1451 struct device_attribute *attr,
1452 const char *buf, size_t count)
1453 {
1454 struct ipw_priv *priv = dev_get_drvdata(d);
1455
1456 if (!priv->prom_priv) {
1457 IPW_ERROR("Attempting to set filter without "
1458 "rtap_iface enabled.\n");
1459 return -EPERM;
1460 }
1461
1462 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1463
1464 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1465 BIT_ARG16(priv->prom_priv->filter));
1466
1467 return count;
1468 }
1469
1470 static ssize_t show_rtap_filter(struct device *d,
1471 struct device_attribute *attr,
1472 char *buf)
1473 {
1474 struct ipw_priv *priv = dev_get_drvdata(d);
1475 return sprintf(buf, "0x%04X",
1476 priv->prom_priv ? priv->prom_priv->filter : 0);
1477 }
1478
1479 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1480 store_rtap_filter);
1481 #endif
1482
1483 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1484 char *buf)
1485 {
1486 struct ipw_priv *priv = dev_get_drvdata(d);
1487 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1488 }
1489
1490 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1491 const char *buf, size_t count)
1492 {
1493 struct ipw_priv *priv = dev_get_drvdata(d);
1494 struct net_device *dev = priv->net_dev;
1495 char buffer[] = "00000000";
1496 unsigned long len =
1497 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1498 unsigned long val;
1499 char *p = buffer;
1500
1501 IPW_DEBUG_INFO("enter\n");
1502
1503 strncpy(buffer, buf, len);
1504 buffer[len] = 0;
1505
1506 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1507 p++;
1508 if (p[0] == 'x' || p[0] == 'X')
1509 p++;
1510 val = simple_strtoul(p, &p, 16);
1511 } else
1512 val = simple_strtoul(p, &p, 10);
1513 if (p == buffer) {
1514 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1515 } else {
1516 priv->ieee->scan_age = val;
1517 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1518 }
1519
1520 IPW_DEBUG_INFO("exit\n");
1521 return len;
1522 }
1523
1524 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1525
1526 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1527 char *buf)
1528 {
1529 struct ipw_priv *priv = dev_get_drvdata(d);
1530 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1531 }
1532
1533 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1534 const char *buf, size_t count)
1535 {
1536 struct ipw_priv *priv = dev_get_drvdata(d);
1537
1538 IPW_DEBUG_INFO("enter\n");
1539
1540 if (count == 0)
1541 return 0;
1542
1543 if (*buf == 0) {
1544 IPW_DEBUG_LED("Disabling LED control.\n");
1545 priv->config |= CFG_NO_LED;
1546 ipw_led_shutdown(priv);
1547 } else {
1548 IPW_DEBUG_LED("Enabling LED control.\n");
1549 priv->config &= ~CFG_NO_LED;
1550 ipw_led_init(priv);
1551 }
1552
1553 IPW_DEBUG_INFO("exit\n");
1554 return count;
1555 }
1556
1557 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1558
1559 static ssize_t show_status(struct device *d,
1560 struct device_attribute *attr, char *buf)
1561 {
1562 struct ipw_priv *p = dev_get_drvdata(d);
1563 return sprintf(buf, "0x%08x\n", (int)p->status);
1564 }
1565
1566 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1567
1568 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1569 char *buf)
1570 {
1571 struct ipw_priv *p = dev_get_drvdata(d);
1572 return sprintf(buf, "0x%08x\n", (int)p->config);
1573 }
1574
1575 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1576
1577 static ssize_t show_nic_type(struct device *d,
1578 struct device_attribute *attr, char *buf)
1579 {
1580 struct ipw_priv *priv = dev_get_drvdata(d);
1581 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1582 }
1583
1584 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1585
1586 static ssize_t show_ucode_version(struct device *d,
1587 struct device_attribute *attr, char *buf)
1588 {
1589 u32 len = sizeof(u32), tmp = 0;
1590 struct ipw_priv *p = dev_get_drvdata(d);
1591
1592 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1593 return 0;
1594
1595 return sprintf(buf, "0x%08x\n", tmp);
1596 }
1597
1598 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1599
1600 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1601 char *buf)
1602 {
1603 u32 len = sizeof(u32), tmp = 0;
1604 struct ipw_priv *p = dev_get_drvdata(d);
1605
1606 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1607 return 0;
1608
1609 return sprintf(buf, "0x%08x\n", tmp);
1610 }
1611
1612 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1613
1614 /*
1615 * Add a device attribute to view/control the delay between eeprom
1616 * operations.
1617 */
1618 static ssize_t show_eeprom_delay(struct device *d,
1619 struct device_attribute *attr, char *buf)
1620 {
1621 struct ipw_priv *p = dev_get_drvdata(d);
1622 int n = p->eeprom_delay;
1623 return sprintf(buf, "%i\n", n);
1624 }
1625 static ssize_t store_eeprom_delay(struct device *d,
1626 struct device_attribute *attr,
1627 const char *buf, size_t count)
1628 {
1629 struct ipw_priv *p = dev_get_drvdata(d);
1630 sscanf(buf, "%i", &p->eeprom_delay);
1631 return strnlen(buf, count);
1632 }
1633
1634 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1635 show_eeprom_delay, store_eeprom_delay);
1636
1637 static ssize_t show_command_event_reg(struct device *d,
1638 struct device_attribute *attr, char *buf)
1639 {
1640 u32 reg = 0;
1641 struct ipw_priv *p = dev_get_drvdata(d);
1642
1643 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1644 return sprintf(buf, "0x%08x\n", reg);
1645 }
1646 static ssize_t store_command_event_reg(struct device *d,
1647 struct device_attribute *attr,
1648 const char *buf, size_t count)
1649 {
1650 u32 reg;
1651 struct ipw_priv *p = dev_get_drvdata(d);
1652
1653 sscanf(buf, "%x", &reg);
1654 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1655 return strnlen(buf, count);
1656 }
1657
1658 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1659 show_command_event_reg, store_command_event_reg);
1660
1661 static ssize_t show_mem_gpio_reg(struct device *d,
1662 struct device_attribute *attr, char *buf)
1663 {
1664 u32 reg = 0;
1665 struct ipw_priv *p = dev_get_drvdata(d);
1666
1667 reg = ipw_read_reg32(p, 0x301100);
1668 return sprintf(buf, "0x%08x\n", reg);
1669 }
1670 static ssize_t store_mem_gpio_reg(struct device *d,
1671 struct device_attribute *attr,
1672 const char *buf, size_t count)
1673 {
1674 u32 reg;
1675 struct ipw_priv *p = dev_get_drvdata(d);
1676
1677 sscanf(buf, "%x", &reg);
1678 ipw_write_reg32(p, 0x301100, reg);
1679 return strnlen(buf, count);
1680 }
1681
1682 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1683 show_mem_gpio_reg, store_mem_gpio_reg);
1684
1685 static ssize_t show_indirect_dword(struct device *d,
1686 struct device_attribute *attr, char *buf)
1687 {
1688 u32 reg = 0;
1689 struct ipw_priv *priv = dev_get_drvdata(d);
1690
1691 if (priv->status & STATUS_INDIRECT_DWORD)
1692 reg = ipw_read_reg32(priv, priv->indirect_dword);
1693 else
1694 reg = 0;
1695
1696 return sprintf(buf, "0x%08x\n", reg);
1697 }
1698 static ssize_t store_indirect_dword(struct device *d,
1699 struct device_attribute *attr,
1700 const char *buf, size_t count)
1701 {
1702 struct ipw_priv *priv = dev_get_drvdata(d);
1703
1704 sscanf(buf, "%x", &priv->indirect_dword);
1705 priv->status |= STATUS_INDIRECT_DWORD;
1706 return strnlen(buf, count);
1707 }
1708
1709 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1710 show_indirect_dword, store_indirect_dword);
1711
1712 static ssize_t show_indirect_byte(struct device *d,
1713 struct device_attribute *attr, char *buf)
1714 {
1715 u8 reg = 0;
1716 struct ipw_priv *priv = dev_get_drvdata(d);
1717
1718 if (priv->status & STATUS_INDIRECT_BYTE)
1719 reg = ipw_read_reg8(priv, priv->indirect_byte);
1720 else
1721 reg = 0;
1722
1723 return sprintf(buf, "0x%02x\n", reg);
1724 }
1725 static ssize_t store_indirect_byte(struct device *d,
1726 struct device_attribute *attr,
1727 const char *buf, size_t count)
1728 {
1729 struct ipw_priv *priv = dev_get_drvdata(d);
1730
1731 sscanf(buf, "%x", &priv->indirect_byte);
1732 priv->status |= STATUS_INDIRECT_BYTE;
1733 return strnlen(buf, count);
1734 }
1735
1736 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1737 show_indirect_byte, store_indirect_byte);
1738
1739 static ssize_t show_direct_dword(struct device *d,
1740 struct device_attribute *attr, char *buf)
1741 {
1742 u32 reg = 0;
1743 struct ipw_priv *priv = dev_get_drvdata(d);
1744
1745 if (priv->status & STATUS_DIRECT_DWORD)
1746 reg = ipw_read32(priv, priv->direct_dword);
1747 else
1748 reg = 0;
1749
1750 return sprintf(buf, "0x%08x\n", reg);
1751 }
1752 static ssize_t store_direct_dword(struct device *d,
1753 struct device_attribute *attr,
1754 const char *buf, size_t count)
1755 {
1756 struct ipw_priv *priv = dev_get_drvdata(d);
1757
1758 sscanf(buf, "%x", &priv->direct_dword);
1759 priv->status |= STATUS_DIRECT_DWORD;
1760 return strnlen(buf, count);
1761 }
1762
1763 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1764 show_direct_dword, store_direct_dword);
1765
1766 static int rf_kill_active(struct ipw_priv *priv)
1767 {
1768 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1769 priv->status |= STATUS_RF_KILL_HW;
1770 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1771 } else {
1772 priv->status &= ~STATUS_RF_KILL_HW;
1773 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1774 }
1775
1776 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1777 }
1778
1779 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1780 char *buf)
1781 {
1782 /* 0 - RF kill not enabled
1783 1 - SW based RF kill active (sysfs)
1784 2 - HW based RF kill active
1785 3 - Both HW and SW baed RF kill active */
1786 struct ipw_priv *priv = dev_get_drvdata(d);
1787 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1788 (rf_kill_active(priv) ? 0x2 : 0x0);
1789 return sprintf(buf, "%i\n", val);
1790 }
1791
1792 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1793 {
1794 if ((disable_radio ? 1 : 0) ==
1795 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1796 return 0;
1797
1798 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1799 disable_radio ? "OFF" : "ON");
1800
1801 if (disable_radio) {
1802 priv->status |= STATUS_RF_KILL_SW;
1803
1804 cancel_delayed_work(&priv->request_scan);
1805 cancel_delayed_work(&priv->request_direct_scan);
1806 cancel_delayed_work(&priv->request_passive_scan);
1807 cancel_delayed_work(&priv->scan_event);
1808 schedule_work(&priv->down);
1809 } else {
1810 priv->status &= ~STATUS_RF_KILL_SW;
1811 if (rf_kill_active(priv)) {
1812 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1813 "disabled by HW switch\n");
1814 /* Make sure the RF_KILL check timer is running */
1815 cancel_delayed_work(&priv->rf_kill);
1816 schedule_delayed_work(&priv->rf_kill,
1817 round_jiffies_relative(2 * HZ));
1818 } else
1819 schedule_work(&priv->up);
1820 }
1821
1822 return 1;
1823 }
1824
1825 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1826 const char *buf, size_t count)
1827 {
1828 struct ipw_priv *priv = dev_get_drvdata(d);
1829
1830 ipw_radio_kill_sw(priv, buf[0] == '1');
1831
1832 return count;
1833 }
1834
1835 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1836
1837 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1838 char *buf)
1839 {
1840 struct ipw_priv *priv = dev_get_drvdata(d);
1841 int pos = 0, len = 0;
1842 if (priv->config & CFG_SPEED_SCAN) {
1843 while (priv->speed_scan[pos] != 0)
1844 len += sprintf(&buf[len], "%d ",
1845 priv->speed_scan[pos++]);
1846 return len + sprintf(&buf[len], "\n");
1847 }
1848
1849 return sprintf(buf, "0\n");
1850 }
1851
1852 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1853 const char *buf, size_t count)
1854 {
1855 struct ipw_priv *priv = dev_get_drvdata(d);
1856 int channel, pos = 0;
1857 const char *p = buf;
1858
1859 /* list of space separated channels to scan, optionally ending with 0 */
1860 while ((channel = simple_strtol(p, NULL, 0))) {
1861 if (pos == MAX_SPEED_SCAN - 1) {
1862 priv->speed_scan[pos] = 0;
1863 break;
1864 }
1865
1866 if (libipw_is_valid_channel(priv->ieee, channel))
1867 priv->speed_scan[pos++] = channel;
1868 else
1869 IPW_WARNING("Skipping invalid channel request: %d\n",
1870 channel);
1871 p = strchr(p, ' ');
1872 if (!p)
1873 break;
1874 while (*p == ' ' || *p == '\t')
1875 p++;
1876 }
1877
1878 if (pos == 0)
1879 priv->config &= ~CFG_SPEED_SCAN;
1880 else {
1881 priv->speed_scan_pos = 0;
1882 priv->config |= CFG_SPEED_SCAN;
1883 }
1884
1885 return count;
1886 }
1887
1888 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1889 store_speed_scan);
1890
1891 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1892 char *buf)
1893 {
1894 struct ipw_priv *priv = dev_get_drvdata(d);
1895 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1896 }
1897
1898 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1899 const char *buf, size_t count)
1900 {
1901 struct ipw_priv *priv = dev_get_drvdata(d);
1902 if (buf[0] == '1')
1903 priv->config |= CFG_NET_STATS;
1904 else
1905 priv->config &= ~CFG_NET_STATS;
1906
1907 return count;
1908 }
1909
1910 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1911 show_net_stats, store_net_stats);
1912
1913 static ssize_t show_channels(struct device *d,
1914 struct device_attribute *attr,
1915 char *buf)
1916 {
1917 struct ipw_priv *priv = dev_get_drvdata(d);
1918 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1919 int len = 0, i;
1920
1921 len = sprintf(&buf[len],
1922 "Displaying %d channels in 2.4Ghz band "
1923 "(802.11bg):\n", geo->bg_channels);
1924
1925 for (i = 0; i < geo->bg_channels; i++) {
1926 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1927 geo->bg[i].channel,
1928 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1929 " (radar spectrum)" : "",
1930 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1931 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1932 ? "" : ", IBSS",
1933 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1934 "passive only" : "active/passive",
1935 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1936 "B" : "B/G");
1937 }
1938
1939 len += sprintf(&buf[len],
1940 "Displaying %d channels in 5.2Ghz band "
1941 "(802.11a):\n", geo->a_channels);
1942 for (i = 0; i < geo->a_channels; i++) {
1943 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1944 geo->a[i].channel,
1945 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1946 " (radar spectrum)" : "",
1947 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1948 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1949 ? "" : ", IBSS",
1950 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1951 "passive only" : "active/passive");
1952 }
1953
1954 return len;
1955 }
1956
1957 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1958
1959 static void notify_wx_assoc_event(struct ipw_priv *priv)
1960 {
1961 union iwreq_data wrqu;
1962 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1963 if (priv->status & STATUS_ASSOCIATED)
1964 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1965 else
1966 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1967 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1968 }
1969
1970 static void ipw_irq_tasklet(struct ipw_priv *priv)
1971 {
1972 u32 inta, inta_mask, handled = 0;
1973 unsigned long flags;
1974 int rc = 0;
1975
1976 spin_lock_irqsave(&priv->irq_lock, flags);
1977
1978 inta = ipw_read32(priv, IPW_INTA_RW);
1979 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1980
1981 if (inta == 0xFFFFFFFF) {
1982 /* Hardware disappeared */
1983 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1984 /* Only handle the cached INTA values */
1985 inta = 0;
1986 }
1987 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1988
1989 /* Add any cached INTA values that need to be handled */
1990 inta |= priv->isr_inta;
1991
1992 spin_unlock_irqrestore(&priv->irq_lock, flags);
1993
1994 spin_lock_irqsave(&priv->lock, flags);
1995
1996 /* handle all the justifications for the interrupt */
1997 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1998 ipw_rx(priv);
1999 handled |= IPW_INTA_BIT_RX_TRANSFER;
2000 }
2001
2002 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2003 IPW_DEBUG_HC("Command completed.\n");
2004 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2005 priv->status &= ~STATUS_HCMD_ACTIVE;
2006 wake_up_interruptible(&priv->wait_command_queue);
2007 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2008 }
2009
2010 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2011 IPW_DEBUG_TX("TX_QUEUE_1\n");
2012 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2013 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2014 }
2015
2016 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2017 IPW_DEBUG_TX("TX_QUEUE_2\n");
2018 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2019 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2020 }
2021
2022 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2023 IPW_DEBUG_TX("TX_QUEUE_3\n");
2024 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2025 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2026 }
2027
2028 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2029 IPW_DEBUG_TX("TX_QUEUE_4\n");
2030 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2031 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2032 }
2033
2034 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2035 IPW_WARNING("STATUS_CHANGE\n");
2036 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2037 }
2038
2039 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2040 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2041 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2042 }
2043
2044 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2045 IPW_WARNING("HOST_CMD_DONE\n");
2046 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2047 }
2048
2049 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2050 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2051 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2052 }
2053
2054 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2055 IPW_WARNING("PHY_OFF_DONE\n");
2056 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2057 }
2058
2059 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2060 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2061 priv->status |= STATUS_RF_KILL_HW;
2062 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2063 wake_up_interruptible(&priv->wait_command_queue);
2064 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2065 cancel_delayed_work(&priv->request_scan);
2066 cancel_delayed_work(&priv->request_direct_scan);
2067 cancel_delayed_work(&priv->request_passive_scan);
2068 cancel_delayed_work(&priv->scan_event);
2069 schedule_work(&priv->link_down);
2070 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2071 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2072 }
2073
2074 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2075 IPW_WARNING("Firmware error detected. Restarting.\n");
2076 if (priv->error) {
2077 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2078 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2079 struct ipw_fw_error *error =
2080 ipw_alloc_error_log(priv);
2081 ipw_dump_error_log(priv, error);
2082 kfree(error);
2083 }
2084 } else {
2085 priv->error = ipw_alloc_error_log(priv);
2086 if (priv->error)
2087 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2088 else
2089 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2090 "log.\n");
2091 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2092 ipw_dump_error_log(priv, priv->error);
2093 }
2094
2095 /* XXX: If hardware encryption is for WPA/WPA2,
2096 * we have to notify the supplicant. */
2097 if (priv->ieee->sec.encrypt) {
2098 priv->status &= ~STATUS_ASSOCIATED;
2099 notify_wx_assoc_event(priv);
2100 }
2101
2102 /* Keep the restart process from trying to send host
2103 * commands by clearing the INIT status bit */
2104 priv->status &= ~STATUS_INIT;
2105
2106 /* Cancel currently queued command. */
2107 priv->status &= ~STATUS_HCMD_ACTIVE;
2108 wake_up_interruptible(&priv->wait_command_queue);
2109
2110 schedule_work(&priv->adapter_restart);
2111 handled |= IPW_INTA_BIT_FATAL_ERROR;
2112 }
2113
2114 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2115 IPW_ERROR("Parity error\n");
2116 handled |= IPW_INTA_BIT_PARITY_ERROR;
2117 }
2118
2119 if (handled != inta) {
2120 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2121 }
2122
2123 spin_unlock_irqrestore(&priv->lock, flags);
2124
2125 /* enable all interrupts */
2126 ipw_enable_interrupts(priv);
2127 }
2128
2129 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2130 static char *get_cmd_string(u8 cmd)
2131 {
2132 switch (cmd) {
2133 IPW_CMD(HOST_COMPLETE);
2134 IPW_CMD(POWER_DOWN);
2135 IPW_CMD(SYSTEM_CONFIG);
2136 IPW_CMD(MULTICAST_ADDRESS);
2137 IPW_CMD(SSID);
2138 IPW_CMD(ADAPTER_ADDRESS);
2139 IPW_CMD(PORT_TYPE);
2140 IPW_CMD(RTS_THRESHOLD);
2141 IPW_CMD(FRAG_THRESHOLD);
2142 IPW_CMD(POWER_MODE);
2143 IPW_CMD(WEP_KEY);
2144 IPW_CMD(TGI_TX_KEY);
2145 IPW_CMD(SCAN_REQUEST);
2146 IPW_CMD(SCAN_REQUEST_EXT);
2147 IPW_CMD(ASSOCIATE);
2148 IPW_CMD(SUPPORTED_RATES);
2149 IPW_CMD(SCAN_ABORT);
2150 IPW_CMD(TX_FLUSH);
2151 IPW_CMD(QOS_PARAMETERS);
2152 IPW_CMD(DINO_CONFIG);
2153 IPW_CMD(RSN_CAPABILITIES);
2154 IPW_CMD(RX_KEY);
2155 IPW_CMD(CARD_DISABLE);
2156 IPW_CMD(SEED_NUMBER);
2157 IPW_CMD(TX_POWER);
2158 IPW_CMD(COUNTRY_INFO);
2159 IPW_CMD(AIRONET_INFO);
2160 IPW_CMD(AP_TX_POWER);
2161 IPW_CMD(CCKM_INFO);
2162 IPW_CMD(CCX_VER_INFO);
2163 IPW_CMD(SET_CALIBRATION);
2164 IPW_CMD(SENSITIVITY_CALIB);
2165 IPW_CMD(RETRY_LIMIT);
2166 IPW_CMD(IPW_PRE_POWER_DOWN);
2167 IPW_CMD(VAP_BEACON_TEMPLATE);
2168 IPW_CMD(VAP_DTIM_PERIOD);
2169 IPW_CMD(EXT_SUPPORTED_RATES);
2170 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2171 IPW_CMD(VAP_QUIET_INTERVALS);
2172 IPW_CMD(VAP_CHANNEL_SWITCH);
2173 IPW_CMD(VAP_MANDATORY_CHANNELS);
2174 IPW_CMD(VAP_CELL_PWR_LIMIT);
2175 IPW_CMD(VAP_CF_PARAM_SET);
2176 IPW_CMD(VAP_SET_BEACONING_STATE);
2177 IPW_CMD(MEASUREMENT);
2178 IPW_CMD(POWER_CAPABILITY);
2179 IPW_CMD(SUPPORTED_CHANNELS);
2180 IPW_CMD(TPC_REPORT);
2181 IPW_CMD(WME_INFO);
2182 IPW_CMD(PRODUCTION_COMMAND);
2183 default:
2184 return "UNKNOWN";
2185 }
2186 }
2187
2188 #define HOST_COMPLETE_TIMEOUT HZ
2189
2190 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2191 {
2192 int rc = 0;
2193 unsigned long flags;
2194
2195 spin_lock_irqsave(&priv->lock, flags);
2196 if (priv->status & STATUS_HCMD_ACTIVE) {
2197 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2198 get_cmd_string(cmd->cmd));
2199 spin_unlock_irqrestore(&priv->lock, flags);
2200 return -EAGAIN;
2201 }
2202
2203 priv->status |= STATUS_HCMD_ACTIVE;
2204
2205 if (priv->cmdlog) {
2206 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2207 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2208 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2209 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2210 cmd->len);
2211 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2212 }
2213
2214 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2215 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2216 priv->status);
2217
2218 #ifndef DEBUG_CMD_WEP_KEY
2219 if (cmd->cmd == IPW_CMD_WEP_KEY)
2220 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2221 else
2222 #endif
2223 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2224
2225 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2226 if (rc) {
2227 priv->status &= ~STATUS_HCMD_ACTIVE;
2228 IPW_ERROR("Failed to send %s: Reason %d\n",
2229 get_cmd_string(cmd->cmd), rc);
2230 spin_unlock_irqrestore(&priv->lock, flags);
2231 goto exit;
2232 }
2233 spin_unlock_irqrestore(&priv->lock, flags);
2234
2235 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2236 !(priv->
2237 status & STATUS_HCMD_ACTIVE),
2238 HOST_COMPLETE_TIMEOUT);
2239 if (rc == 0) {
2240 spin_lock_irqsave(&priv->lock, flags);
2241 if (priv->status & STATUS_HCMD_ACTIVE) {
2242 IPW_ERROR("Failed to send %s: Command timed out.\n",
2243 get_cmd_string(cmd->cmd));
2244 priv->status &= ~STATUS_HCMD_ACTIVE;
2245 spin_unlock_irqrestore(&priv->lock, flags);
2246 rc = -EIO;
2247 goto exit;
2248 }
2249 spin_unlock_irqrestore(&priv->lock, flags);
2250 } else
2251 rc = 0;
2252
2253 if (priv->status & STATUS_RF_KILL_HW) {
2254 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2255 get_cmd_string(cmd->cmd));
2256 rc = -EIO;
2257 goto exit;
2258 }
2259
2260 exit:
2261 if (priv->cmdlog) {
2262 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2263 priv->cmdlog_pos %= priv->cmdlog_len;
2264 }
2265 return rc;
2266 }
2267
2268 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2269 {
2270 struct host_cmd cmd = {
2271 .cmd = command,
2272 };
2273
2274 return __ipw_send_cmd(priv, &cmd);
2275 }
2276
2277 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2278 void *data)
2279 {
2280 struct host_cmd cmd = {
2281 .cmd = command,
2282 .len = len,
2283 .param = data,
2284 };
2285
2286 return __ipw_send_cmd(priv, &cmd);
2287 }
2288
2289 static int ipw_send_host_complete(struct ipw_priv *priv)
2290 {
2291 if (!priv) {
2292 IPW_ERROR("Invalid args\n");
2293 return -1;
2294 }
2295
2296 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2297 }
2298
2299 static int ipw_send_system_config(struct ipw_priv *priv)
2300 {
2301 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2302 sizeof(priv->sys_config),
2303 &priv->sys_config);
2304 }
2305
2306 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2307 {
2308 if (!priv || !ssid) {
2309 IPW_ERROR("Invalid args\n");
2310 return -1;
2311 }
2312
2313 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2314 ssid);
2315 }
2316
2317 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2318 {
2319 if (!priv || !mac) {
2320 IPW_ERROR("Invalid args\n");
2321 return -1;
2322 }
2323
2324 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2325 priv->net_dev->name, mac);
2326
2327 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2328 }
2329
2330 static void ipw_adapter_restart(void *adapter)
2331 {
2332 struct ipw_priv *priv = adapter;
2333
2334 if (priv->status & STATUS_RF_KILL_MASK)
2335 return;
2336
2337 ipw_down(priv);
2338
2339 if (priv->assoc_network &&
2340 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2341 ipw_remove_current_network(priv);
2342
2343 if (ipw_up(priv)) {
2344 IPW_ERROR("Failed to up device\n");
2345 return;
2346 }
2347 }
2348
2349 static void ipw_bg_adapter_restart(struct work_struct *work)
2350 {
2351 struct ipw_priv *priv =
2352 container_of(work, struct ipw_priv, adapter_restart);
2353 mutex_lock(&priv->mutex);
2354 ipw_adapter_restart(priv);
2355 mutex_unlock(&priv->mutex);
2356 }
2357
2358 static void ipw_abort_scan(struct ipw_priv *priv);
2359
2360 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2361
2362 static void ipw_scan_check(void *data)
2363 {
2364 struct ipw_priv *priv = data;
2365
2366 if (priv->status & STATUS_SCAN_ABORTING) {
2367 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2368 "adapter after (%dms).\n",
2369 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2370 schedule_work(&priv->adapter_restart);
2371 } else if (priv->status & STATUS_SCANNING) {
2372 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2373 "after (%dms).\n",
2374 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2375 ipw_abort_scan(priv);
2376 schedule_delayed_work(&priv->scan_check, HZ);
2377 }
2378 }
2379
2380 static void ipw_bg_scan_check(struct work_struct *work)
2381 {
2382 struct ipw_priv *priv =
2383 container_of(work, struct ipw_priv, scan_check.work);
2384 mutex_lock(&priv->mutex);
2385 ipw_scan_check(priv);
2386 mutex_unlock(&priv->mutex);
2387 }
2388
2389 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2390 struct ipw_scan_request_ext *request)
2391 {
2392 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2393 sizeof(*request), request);
2394 }
2395
2396 static int ipw_send_scan_abort(struct ipw_priv *priv)
2397 {
2398 if (!priv) {
2399 IPW_ERROR("Invalid args\n");
2400 return -1;
2401 }
2402
2403 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2404 }
2405
2406 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2407 {
2408 struct ipw_sensitivity_calib calib = {
2409 .beacon_rssi_raw = cpu_to_le16(sens),
2410 };
2411
2412 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2413 &calib);
2414 }
2415
2416 static int ipw_send_associate(struct ipw_priv *priv,
2417 struct ipw_associate *associate)
2418 {
2419 if (!priv || !associate) {
2420 IPW_ERROR("Invalid args\n");
2421 return -1;
2422 }
2423
2424 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2425 associate);
2426 }
2427
2428 static int ipw_send_supported_rates(struct ipw_priv *priv,
2429 struct ipw_supported_rates *rates)
2430 {
2431 if (!priv || !rates) {
2432 IPW_ERROR("Invalid args\n");
2433 return -1;
2434 }
2435
2436 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2437 rates);
2438 }
2439
2440 static int ipw_set_random_seed(struct ipw_priv *priv)
2441 {
2442 u32 val;
2443
2444 if (!priv) {
2445 IPW_ERROR("Invalid args\n");
2446 return -1;
2447 }
2448
2449 get_random_bytes(&val, sizeof(val));
2450
2451 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2452 }
2453
2454 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2455 {
2456 __le32 v = cpu_to_le32(phy_off);
2457 if (!priv) {
2458 IPW_ERROR("Invalid args\n");
2459 return -1;
2460 }
2461
2462 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2463 }
2464
2465 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2466 {
2467 if (!priv || !power) {
2468 IPW_ERROR("Invalid args\n");
2469 return -1;
2470 }
2471
2472 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2473 }
2474
2475 static int ipw_set_tx_power(struct ipw_priv *priv)
2476 {
2477 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2478 struct ipw_tx_power tx_power;
2479 s8 max_power;
2480 int i;
2481
2482 memset(&tx_power, 0, sizeof(tx_power));
2483
2484 /* configure device for 'G' band */
2485 tx_power.ieee_mode = IPW_G_MODE;
2486 tx_power.num_channels = geo->bg_channels;
2487 for (i = 0; i < geo->bg_channels; i++) {
2488 max_power = geo->bg[i].max_power;
2489 tx_power.channels_tx_power[i].channel_number =
2490 geo->bg[i].channel;
2491 tx_power.channels_tx_power[i].tx_power = max_power ?
2492 min(max_power, priv->tx_power) : priv->tx_power;
2493 }
2494 if (ipw_send_tx_power(priv, &tx_power))
2495 return -EIO;
2496
2497 /* configure device to also handle 'B' band */
2498 tx_power.ieee_mode = IPW_B_MODE;
2499 if (ipw_send_tx_power(priv, &tx_power))
2500 return -EIO;
2501
2502 /* configure device to also handle 'A' band */
2503 if (priv->ieee->abg_true) {
2504 tx_power.ieee_mode = IPW_A_MODE;
2505 tx_power.num_channels = geo->a_channels;
2506 for (i = 0; i < tx_power.num_channels; i++) {
2507 max_power = geo->a[i].max_power;
2508 tx_power.channels_tx_power[i].channel_number =
2509 geo->a[i].channel;
2510 tx_power.channels_tx_power[i].tx_power = max_power ?
2511 min(max_power, priv->tx_power) : priv->tx_power;
2512 }
2513 if (ipw_send_tx_power(priv, &tx_power))
2514 return -EIO;
2515 }
2516 return 0;
2517 }
2518
2519 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2520 {
2521 struct ipw_rts_threshold rts_threshold = {
2522 .rts_threshold = cpu_to_le16(rts),
2523 };
2524
2525 if (!priv) {
2526 IPW_ERROR("Invalid args\n");
2527 return -1;
2528 }
2529
2530 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2531 sizeof(rts_threshold), &rts_threshold);
2532 }
2533
2534 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2535 {
2536 struct ipw_frag_threshold frag_threshold = {
2537 .frag_threshold = cpu_to_le16(frag),
2538 };
2539
2540 if (!priv) {
2541 IPW_ERROR("Invalid args\n");
2542 return -1;
2543 }
2544
2545 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2546 sizeof(frag_threshold), &frag_threshold);
2547 }
2548
2549 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2550 {
2551 __le32 param;
2552
2553 if (!priv) {
2554 IPW_ERROR("Invalid args\n");
2555 return -1;
2556 }
2557
2558 /* If on battery, set to 3, if AC set to CAM, else user
2559 * level */
2560 switch (mode) {
2561 case IPW_POWER_BATTERY:
2562 param = cpu_to_le32(IPW_POWER_INDEX_3);
2563 break;
2564 case IPW_POWER_AC:
2565 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2566 break;
2567 default:
2568 param = cpu_to_le32(mode);
2569 break;
2570 }
2571
2572 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2573 &param);
2574 }
2575
2576 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2577 {
2578 struct ipw_retry_limit retry_limit = {
2579 .short_retry_limit = slimit,
2580 .long_retry_limit = llimit
2581 };
2582
2583 if (!priv) {
2584 IPW_ERROR("Invalid args\n");
2585 return -1;
2586 }
2587
2588 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2589 &retry_limit);
2590 }
2591
2592 /*
2593 * The IPW device contains a Microwire compatible EEPROM that stores
2594 * various data like the MAC address. Usually the firmware has exclusive
2595 * access to the eeprom, but during device initialization (before the
2596 * device driver has sent the HostComplete command to the firmware) the
2597 * device driver has read access to the EEPROM by way of indirect addressing
2598 * through a couple of memory mapped registers.
2599 *
2600 * The following is a simplified implementation for pulling data out of the
2601 * the eeprom, along with some helper functions to find information in
2602 * the per device private data's copy of the eeprom.
2603 *
2604 * NOTE: To better understand how these functions work (i.e what is a chip
2605 * select and why do have to keep driving the eeprom clock?), read
2606 * just about any data sheet for a Microwire compatible EEPROM.
2607 */
2608
2609 /* write a 32 bit value into the indirect accessor register */
2610 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2611 {
2612 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2613
2614 /* the eeprom requires some time to complete the operation */
2615 udelay(p->eeprom_delay);
2616 }
2617
2618 /* perform a chip select operation */
2619 static void eeprom_cs(struct ipw_priv *priv)
2620 {
2621 eeprom_write_reg(priv, 0);
2622 eeprom_write_reg(priv, EEPROM_BIT_CS);
2623 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2624 eeprom_write_reg(priv, EEPROM_BIT_CS);
2625 }
2626
2627 /* perform a chip select operation */
2628 static void eeprom_disable_cs(struct ipw_priv *priv)
2629 {
2630 eeprom_write_reg(priv, EEPROM_BIT_CS);
2631 eeprom_write_reg(priv, 0);
2632 eeprom_write_reg(priv, EEPROM_BIT_SK);
2633 }
2634
2635 /* push a single bit down to the eeprom */
2636 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2637 {
2638 int d = (bit ? EEPROM_BIT_DI : 0);
2639 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2640 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2641 }
2642
2643 /* push an opcode followed by an address down to the eeprom */
2644 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2645 {
2646 int i;
2647
2648 eeprom_cs(priv);
2649 eeprom_write_bit(priv, 1);
2650 eeprom_write_bit(priv, op & 2);
2651 eeprom_write_bit(priv, op & 1);
2652 for (i = 7; i >= 0; i--) {
2653 eeprom_write_bit(priv, addr & (1 << i));
2654 }
2655 }
2656
2657 /* pull 16 bits off the eeprom, one bit at a time */
2658 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2659 {
2660 int i;
2661 u16 r = 0;
2662
2663 /* Send READ Opcode */
2664 eeprom_op(priv, EEPROM_CMD_READ, addr);
2665
2666 /* Send dummy bit */
2667 eeprom_write_reg(priv, EEPROM_BIT_CS);
2668
2669 /* Read the byte off the eeprom one bit at a time */
2670 for (i = 0; i < 16; i++) {
2671 u32 data = 0;
2672 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2673 eeprom_write_reg(priv, EEPROM_BIT_CS);
2674 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2675 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2676 }
2677
2678 /* Send another dummy bit */
2679 eeprom_write_reg(priv, 0);
2680 eeprom_disable_cs(priv);
2681
2682 return r;
2683 }
2684
2685 /* helper function for pulling the mac address out of the private */
2686 /* data's copy of the eeprom data */
2687 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2688 {
2689 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2690 }
2691
2692 /*
2693 * Either the device driver (i.e. the host) or the firmware can
2694 * load eeprom data into the designated region in SRAM. If neither
2695 * happens then the FW will shutdown with a fatal error.
2696 *
2697 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2698 * bit needs region of shared SRAM needs to be non-zero.
2699 */
2700 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2701 {
2702 int i;
2703 __le16 *eeprom = (__le16 *) priv->eeprom;
2704
2705 IPW_DEBUG_TRACE(">>\n");
2706
2707 /* read entire contents of eeprom into private buffer */
2708 for (i = 0; i < 128; i++)
2709 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2710
2711 /*
2712 If the data looks correct, then copy it to our private
2713 copy. Otherwise let the firmware know to perform the operation
2714 on its own.
2715 */
2716 if (priv->eeprom[EEPROM_VERSION] != 0) {
2717 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2718
2719 /* write the eeprom data to sram */
2720 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2721 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2722
2723 /* Do not load eeprom data on fatal error or suspend */
2724 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2725 } else {
2726 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2727
2728 /* Load eeprom data on fatal error or suspend */
2729 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2730 }
2731
2732 IPW_DEBUG_TRACE("<<\n");
2733 }
2734
2735 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2736 {
2737 count >>= 2;
2738 if (!count)
2739 return;
2740 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2741 while (count--)
2742 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2743 }
2744
2745 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2746 {
2747 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2748 CB_NUMBER_OF_ELEMENTS_SMALL *
2749 sizeof(struct command_block));
2750 }
2751
2752 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2753 { /* start dma engine but no transfers yet */
2754
2755 IPW_DEBUG_FW(">> :\n");
2756
2757 /* Start the dma */
2758 ipw_fw_dma_reset_command_blocks(priv);
2759
2760 /* Write CB base address */
2761 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2762
2763 IPW_DEBUG_FW("<< :\n");
2764 return 0;
2765 }
2766
2767 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2768 {
2769 u32 control = 0;
2770
2771 IPW_DEBUG_FW(">> :\n");
2772
2773 /* set the Stop and Abort bit */
2774 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2775 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2776 priv->sram_desc.last_cb_index = 0;
2777
2778 IPW_DEBUG_FW("<<\n");
2779 }
2780
2781 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2782 struct command_block *cb)
2783 {
2784 u32 address =
2785 IPW_SHARED_SRAM_DMA_CONTROL +
2786 (sizeof(struct command_block) * index);
2787 IPW_DEBUG_FW(">> :\n");
2788
2789 ipw_write_indirect(priv, address, (u8 *) cb,
2790 (int)sizeof(struct command_block));
2791
2792 IPW_DEBUG_FW("<< :\n");
2793 return 0;
2794
2795 }
2796
2797 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2798 {
2799 u32 control = 0;
2800 u32 index = 0;
2801
2802 IPW_DEBUG_FW(">> :\n");
2803
2804 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2805 ipw_fw_dma_write_command_block(priv, index,
2806 &priv->sram_desc.cb_list[index]);
2807
2808 /* Enable the DMA in the CSR register */
2809 ipw_clear_bit(priv, IPW_RESET_REG,
2810 IPW_RESET_REG_MASTER_DISABLED |
2811 IPW_RESET_REG_STOP_MASTER);
2812
2813 /* Set the Start bit. */
2814 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2815 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2816
2817 IPW_DEBUG_FW("<< :\n");
2818 return 0;
2819 }
2820
2821 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2822 {
2823 u32 address;
2824 u32 register_value = 0;
2825 u32 cb_fields_address = 0;
2826
2827 IPW_DEBUG_FW(">> :\n");
2828 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2829 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2830
2831 /* Read the DMA Controlor register */
2832 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2833 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2834
2835 /* Print the CB values */
2836 cb_fields_address = address;
2837 register_value = ipw_read_reg32(priv, cb_fields_address);
2838 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2839
2840 cb_fields_address += sizeof(u32);
2841 register_value = ipw_read_reg32(priv, cb_fields_address);
2842 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2843
2844 cb_fields_address += sizeof(u32);
2845 register_value = ipw_read_reg32(priv, cb_fields_address);
2846 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2847 register_value);
2848
2849 cb_fields_address += sizeof(u32);
2850 register_value = ipw_read_reg32(priv, cb_fields_address);
2851 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2852
2853 IPW_DEBUG_FW(">> :\n");
2854 }
2855
2856 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2857 {
2858 u32 current_cb_address = 0;
2859 u32 current_cb_index = 0;
2860
2861 IPW_DEBUG_FW("<< :\n");
2862 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2863
2864 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2865 sizeof(struct command_block);
2866
2867 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2868 current_cb_index, current_cb_address);
2869
2870 IPW_DEBUG_FW(">> :\n");
2871 return current_cb_index;
2872
2873 }
2874
2875 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2876 u32 src_address,
2877 u32 dest_address,
2878 u32 length,
2879 int interrupt_enabled, int is_last)
2880 {
2881
2882 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2883 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2884 CB_DEST_SIZE_LONG;
2885 struct command_block *cb;
2886 u32 last_cb_element = 0;
2887
2888 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2889 src_address, dest_address, length);
2890
2891 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2892 return -1;
2893
2894 last_cb_element = priv->sram_desc.last_cb_index;
2895 cb = &priv->sram_desc.cb_list[last_cb_element];
2896 priv->sram_desc.last_cb_index++;
2897
2898 /* Calculate the new CB control word */
2899 if (interrupt_enabled)
2900 control |= CB_INT_ENABLED;
2901
2902 if (is_last)
2903 control |= CB_LAST_VALID;
2904
2905 control |= length;
2906
2907 /* Calculate the CB Element's checksum value */
2908 cb->status = control ^ src_address ^ dest_address;
2909
2910 /* Copy the Source and Destination addresses */
2911 cb->dest_addr = dest_address;
2912 cb->source_addr = src_address;
2913
2914 /* Copy the Control Word last */
2915 cb->control = control;
2916
2917 return 0;
2918 }
2919
2920 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2921 int nr, u32 dest_address, u32 len)
2922 {
2923 int ret, i;
2924 u32 size;
2925
2926 IPW_DEBUG_FW(">>\n");
2927 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2928 nr, dest_address, len);
2929
2930 for (i = 0; i < nr; i++) {
2931 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2932 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2933 dest_address +
2934 i * CB_MAX_LENGTH, size,
2935 0, 0);
2936 if (ret) {
2937 IPW_DEBUG_FW_INFO(": Failed\n");
2938 return -1;
2939 } else
2940 IPW_DEBUG_FW_INFO(": Added new cb\n");
2941 }
2942
2943 IPW_DEBUG_FW("<<\n");
2944 return 0;
2945 }
2946
2947 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2948 {
2949 u32 current_index = 0, previous_index;
2950 u32 watchdog = 0;
2951
2952 IPW_DEBUG_FW(">> :\n");
2953
2954 current_index = ipw_fw_dma_command_block_index(priv);
2955 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2956 (int)priv->sram_desc.last_cb_index);
2957
2958 while (current_index < priv->sram_desc.last_cb_index) {
2959 udelay(50);
2960 previous_index = current_index;
2961 current_index = ipw_fw_dma_command_block_index(priv);
2962
2963 if (previous_index < current_index) {
2964 watchdog = 0;
2965 continue;
2966 }
2967 if (++watchdog > 400) {
2968 IPW_DEBUG_FW_INFO("Timeout\n");
2969 ipw_fw_dma_dump_command_block(priv);
2970 ipw_fw_dma_abort(priv);
2971 return -1;
2972 }
2973 }
2974
2975 ipw_fw_dma_abort(priv);
2976
2977 /*Disable the DMA in the CSR register */
2978 ipw_set_bit(priv, IPW_RESET_REG,
2979 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2980
2981 IPW_DEBUG_FW("<< dmaWaitSync\n");
2982 return 0;
2983 }
2984
2985 static void ipw_remove_current_network(struct ipw_priv *priv)
2986 {
2987 struct list_head *element, *safe;
2988 struct libipw_network *network = NULL;
2989 unsigned long flags;
2990
2991 spin_lock_irqsave(&priv->ieee->lock, flags);
2992 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2993 network = list_entry(element, struct libipw_network, list);
2994 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2995 list_del(element);
2996 list_add_tail(&network->list,
2997 &priv->ieee->network_free_list);
2998 }
2999 }
3000 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3001 }
3002
3003 /**
3004 * Check that card is still alive.
3005 * Reads debug register from domain0.
3006 * If card is present, pre-defined value should
3007 * be found there.
3008 *
3009 * @param priv
3010 * @return 1 if card is present, 0 otherwise
3011 */
3012 static inline int ipw_alive(struct ipw_priv *priv)
3013 {
3014 return ipw_read32(priv, 0x90) == 0xd55555d5;
3015 }
3016
3017 /* timeout in msec, attempted in 10-msec quanta */
3018 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3019 int timeout)
3020 {
3021 int i = 0;
3022
3023 do {
3024 if ((ipw_read32(priv, addr) & mask) == mask)
3025 return i;
3026 mdelay(10);
3027 i += 10;
3028 } while (i < timeout);
3029
3030 return -ETIME;
3031 }
3032
3033 /* These functions load the firmware and micro code for the operation of
3034 * the ipw hardware. It assumes the buffer has all the bits for the
3035 * image and the caller is handling the memory allocation and clean up.
3036 */
3037
3038 static int ipw_stop_master(struct ipw_priv *priv)
3039 {
3040 int rc;
3041
3042 IPW_DEBUG_TRACE(">>\n");
3043 /* stop master. typical delay - 0 */
3044 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3045
3046 /* timeout is in msec, polled in 10-msec quanta */
3047 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3048 IPW_RESET_REG_MASTER_DISABLED, 100);
3049 if (rc < 0) {
3050 IPW_ERROR("wait for stop master failed after 100ms\n");
3051 return -1;
3052 }
3053
3054 IPW_DEBUG_INFO("stop master %dms\n", rc);
3055
3056 return rc;
3057 }
3058
3059 static void ipw_arc_release(struct ipw_priv *priv)
3060 {
3061 IPW_DEBUG_TRACE(">>\n");
3062 mdelay(5);
3063
3064 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3065
3066 /* no one knows timing, for safety add some delay */
3067 mdelay(5);
3068 }
3069
3070 struct fw_chunk {
3071 __le32 address;
3072 __le32 length;
3073 };
3074
3075 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3076 {
3077 int rc = 0, i, addr;
3078 u8 cr = 0;
3079 __le16 *image;
3080
3081 image = (__le16 *) data;
3082
3083 IPW_DEBUG_TRACE(">>\n");
3084
3085 rc = ipw_stop_master(priv);
3086
3087 if (rc < 0)
3088 return rc;
3089
3090 for (addr = IPW_SHARED_LOWER_BOUND;
3091 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3092 ipw_write32(priv, addr, 0);
3093 }
3094
3095 /* no ucode (yet) */
3096 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3097 /* destroy DMA queues */
3098 /* reset sequence */
3099
3100 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3101 ipw_arc_release(priv);
3102 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3103 mdelay(1);
3104
3105 /* reset PHY */
3106 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3107 mdelay(1);
3108
3109 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3110 mdelay(1);
3111
3112 /* enable ucode store */
3113 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3114 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3115 mdelay(1);
3116
3117 /* write ucode */
3118 /**
3119 * @bug
3120 * Do NOT set indirect address register once and then
3121 * store data to indirect data register in the loop.
3122 * It seems very reasonable, but in this case DINO do not
3123 * accept ucode. It is essential to set address each time.
3124 */
3125 /* load new ipw uCode */
3126 for (i = 0; i < len / 2; i++)
3127 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3128 le16_to_cpu(image[i]));
3129
3130 /* enable DINO */
3131 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3132 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3133
3134 /* this is where the igx / win driver deveates from the VAP driver. */
3135
3136 /* wait for alive response */
3137 for (i = 0; i < 100; i++) {
3138 /* poll for incoming data */
3139 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3140 if (cr & DINO_RXFIFO_DATA)
3141 break;
3142 mdelay(1);
3143 }
3144
3145 if (cr & DINO_RXFIFO_DATA) {
3146 /* alive_command_responce size is NOT multiple of 4 */
3147 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3148
3149 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3150 response_buffer[i] =
3151 cpu_to_le32(ipw_read_reg32(priv,
3152 IPW_BASEBAND_RX_FIFO_READ));
3153 memcpy(&priv->dino_alive, response_buffer,
3154 sizeof(priv->dino_alive));
3155 if (priv->dino_alive.alive_command == 1
3156 && priv->dino_alive.ucode_valid == 1) {
3157 rc = 0;
3158 IPW_DEBUG_INFO
3159 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3160 "of %02d/%02d/%02d %02d:%02d\n",
3161 priv->dino_alive.software_revision,
3162 priv->dino_alive.software_revision,
3163 priv->dino_alive.device_identifier,
3164 priv->dino_alive.device_identifier,
3165 priv->dino_alive.time_stamp[0],
3166 priv->dino_alive.time_stamp[1],
3167 priv->dino_alive.time_stamp[2],
3168 priv->dino_alive.time_stamp[3],
3169 priv->dino_alive.time_stamp[4]);
3170 } else {
3171 IPW_DEBUG_INFO("Microcode is not alive\n");
3172 rc = -EINVAL;
3173 }
3174 } else {
3175 IPW_DEBUG_INFO("No alive response from DINO\n");
3176 rc = -ETIME;
3177 }
3178
3179 /* disable DINO, otherwise for some reason
3180 firmware have problem getting alive resp. */
3181 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3182
3183 return rc;
3184 }
3185
3186 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3187 {
3188 int ret = -1;
3189 int offset = 0;
3190 struct fw_chunk *chunk;
3191 int total_nr = 0;
3192 int i;
3193 struct pci_pool *pool;
3194 void **virts;
3195 dma_addr_t *phys;
3196
3197 IPW_DEBUG_TRACE("<< :\n");
3198
3199 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3200 GFP_KERNEL);
3201 if (!virts)
3202 return -ENOMEM;
3203
3204 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3205 GFP_KERNEL);
3206 if (!phys) {
3207 kfree(virts);
3208 return -ENOMEM;
3209 }
3210 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3211 if (!pool) {
3212 IPW_ERROR("pci_pool_create failed\n");
3213 kfree(phys);
3214 kfree(virts);
3215 return -ENOMEM;
3216 }
3217
3218 /* Start the Dma */
3219 ret = ipw_fw_dma_enable(priv);
3220
3221 /* the DMA is already ready this would be a bug. */
3222 BUG_ON(priv->sram_desc.last_cb_index > 0);
3223
3224 do {
3225 u32 chunk_len;
3226 u8 *start;
3227 int size;
3228 int nr = 0;
3229
3230 chunk = (struct fw_chunk *)(data + offset);
3231 offset += sizeof(struct fw_chunk);
3232 chunk_len = le32_to_cpu(chunk->length);
3233 start = data + offset;
3234
3235 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3236 for (i = 0; i < nr; i++) {
3237 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3238 &phys[total_nr]);
3239 if (!virts[total_nr]) {
3240 ret = -ENOMEM;
3241 goto out;
3242 }
3243 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3244 CB_MAX_LENGTH);
3245 memcpy(virts[total_nr], start, size);
3246 start += size;
3247 total_nr++;
3248 /* We don't support fw chunk larger than 64*8K */
3249 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3250 }
3251
3252 /* build DMA packet and queue up for sending */
3253 /* dma to chunk->address, the chunk->length bytes from data +
3254 * offeset*/
3255 /* Dma loading */
3256 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3257 nr, le32_to_cpu(chunk->address),
3258 chunk_len);
3259 if (ret) {
3260 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3261 goto out;
3262 }
3263
3264 offset += chunk_len;
3265 } while (offset < len);
3266
3267 /* Run the DMA and wait for the answer */
3268 ret = ipw_fw_dma_kick(priv);
3269 if (ret) {
3270 IPW_ERROR("dmaKick Failed\n");
3271 goto out;
3272 }
3273
3274 ret = ipw_fw_dma_wait(priv);
3275 if (ret) {
3276 IPW_ERROR("dmaWaitSync Failed\n");
3277 goto out;
3278 }
3279 out:
3280 for (i = 0; i < total_nr; i++)
3281 pci_pool_free(pool, virts[i], phys[i]);
3282
3283 pci_pool_destroy(pool);
3284 kfree(phys);
3285 kfree(virts);
3286
3287 return ret;
3288 }
3289
3290 /* stop nic */
3291 static int ipw_stop_nic(struct ipw_priv *priv)
3292 {
3293 int rc = 0;
3294
3295 /* stop */
3296 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3297
3298 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3299 IPW_RESET_REG_MASTER_DISABLED, 500);
3300 if (rc < 0) {
3301 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3302 return rc;
3303 }
3304
3305 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3306
3307 return rc;
3308 }
3309
3310 static void ipw_start_nic(struct ipw_priv *priv)
3311 {
3312 IPW_DEBUG_TRACE(">>\n");
3313
3314 /* prvHwStartNic release ARC */
3315 ipw_clear_bit(priv, IPW_RESET_REG,
3316 IPW_RESET_REG_MASTER_DISABLED |
3317 IPW_RESET_REG_STOP_MASTER |
3318 CBD_RESET_REG_PRINCETON_RESET);
3319
3320 /* enable power management */
3321 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3322 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3323
3324 IPW_DEBUG_TRACE("<<\n");
3325 }
3326
3327 static int ipw_init_nic(struct ipw_priv *priv)
3328 {
3329 int rc;
3330
3331 IPW_DEBUG_TRACE(">>\n");
3332 /* reset */
3333 /*prvHwInitNic */
3334 /* set "initialization complete" bit to move adapter to D0 state */
3335 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3336
3337 /* low-level PLL activation */
3338 ipw_write32(priv, IPW_READ_INT_REGISTER,
3339 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3340
3341 /* wait for clock stabilization */
3342 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3343 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3344 if (rc < 0)
3345 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3346
3347 /* assert SW reset */
3348 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3349
3350 udelay(10);
3351
3352 /* set "initialization complete" bit to move adapter to D0 state */
3353 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3354
3355 IPW_DEBUG_TRACE(">>\n");
3356 return 0;
3357 }
3358
3359 /* Call this function from process context, it will sleep in request_firmware.
3360 * Probe is an ok place to call this from.
3361 */
3362 static int ipw_reset_nic(struct ipw_priv *priv)
3363 {
3364 int rc = 0;
3365 unsigned long flags;
3366
3367 IPW_DEBUG_TRACE(">>\n");
3368
3369 rc = ipw_init_nic(priv);
3370
3371 spin_lock_irqsave(&priv->lock, flags);
3372 /* Clear the 'host command active' bit... */
3373 priv->status &= ~STATUS_HCMD_ACTIVE;
3374 wake_up_interruptible(&priv->wait_command_queue);
3375 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3376 wake_up_interruptible(&priv->wait_state);
3377 spin_unlock_irqrestore(&priv->lock, flags);
3378
3379 IPW_DEBUG_TRACE("<<\n");
3380 return rc;
3381 }
3382
3383
3384 struct ipw_fw {
3385 __le32 ver;
3386 __le32 boot_size;
3387 __le32 ucode_size;
3388 __le32 fw_size;
3389 u8 data[0];
3390 };
3391
3392 static int ipw_get_fw(struct ipw_priv *priv,
3393 const struct firmware **raw, const char *name)
3394 {
3395 struct ipw_fw *fw;
3396 int rc;
3397
3398 /* ask firmware_class module to get the boot firmware off disk */
3399 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3400 if (rc < 0) {
3401 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3402 return rc;
3403 }
3404
3405 if ((*raw)->size < sizeof(*fw)) {
3406 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3407 return -EINVAL;
3408 }
3409
3410 fw = (void *)(*raw)->data;
3411
3412 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3413 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3414 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3415 name, (*raw)->size);
3416 return -EINVAL;
3417 }
3418
3419 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3420 name,
3421 le32_to_cpu(fw->ver) >> 16,
3422 le32_to_cpu(fw->ver) & 0xff,
3423 (*raw)->size - sizeof(*fw));
3424 return 0;
3425 }
3426
3427 #define IPW_RX_BUF_SIZE (3000)
3428
3429 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3430 struct ipw_rx_queue *rxq)
3431 {
3432 unsigned long flags;
3433 int i;
3434
3435 spin_lock_irqsave(&rxq->lock, flags);
3436
3437 INIT_LIST_HEAD(&rxq->rx_free);
3438 INIT_LIST_HEAD(&rxq->rx_used);
3439
3440 /* Fill the rx_used queue with _all_ of the Rx buffers */
3441 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3442 /* In the reset function, these buffers may have been allocated
3443 * to an SKB, so we need to unmap and free potential storage */
3444 if (rxq->pool[i].skb != NULL) {
3445 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3446 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3447 dev_kfree_skb(rxq->pool[i].skb);
3448 rxq->pool[i].skb = NULL;
3449 }
3450 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3451 }
3452
3453 /* Set us so that we have processed and used all buffers, but have
3454 * not restocked the Rx queue with fresh buffers */
3455 rxq->read = rxq->write = 0;
3456 rxq->free_count = 0;
3457 spin_unlock_irqrestore(&rxq->lock, flags);
3458 }
3459
3460 #ifdef CONFIG_PM
3461 static int fw_loaded = 0;
3462 static const struct firmware *raw = NULL;
3463
3464 static void free_firmware(void)
3465 {
3466 if (fw_loaded) {
3467 release_firmware(raw);
3468 raw = NULL;
3469 fw_loaded = 0;
3470 }
3471 }
3472 #else
3473 #define free_firmware() do {} while (0)
3474 #endif
3475
3476 static int ipw_load(struct ipw_priv *priv)
3477 {
3478 #ifndef CONFIG_PM
3479 const struct firmware *raw = NULL;
3480 #endif
3481 struct ipw_fw *fw;
3482 u8 *boot_img, *ucode_img, *fw_img;
3483 u8 *name = NULL;
3484 int rc = 0, retries = 3;
3485
3486 switch (priv->ieee->iw_mode) {
3487 case IW_MODE_ADHOC:
3488 name = "ipw2200-ibss.fw";
3489 break;
3490 #ifdef CONFIG_IPW2200_MONITOR
3491 case IW_MODE_MONITOR:
3492 name = "ipw2200-sniffer.fw";
3493 break;
3494 #endif
3495 case IW_MODE_INFRA:
3496 name = "ipw2200-bss.fw";
3497 break;
3498 }
3499
3500 if (!name) {
3501 rc = -EINVAL;
3502 goto error;
3503 }
3504
3505 #ifdef CONFIG_PM
3506 if (!fw_loaded) {
3507 #endif
3508 rc = ipw_get_fw(priv, &raw, name);
3509 if (rc < 0)
3510 goto error;
3511 #ifdef CONFIG_PM
3512 }
3513 #endif
3514
3515 fw = (void *)raw->data;
3516 boot_img = &fw->data[0];
3517 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3518 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3519 le32_to_cpu(fw->ucode_size)];
3520
3521 if (rc < 0)
3522 goto error;
3523
3524 if (!priv->rxq)
3525 priv->rxq = ipw_rx_queue_alloc(priv);
3526 else
3527 ipw_rx_queue_reset(priv, priv->rxq);
3528 if (!priv->rxq) {
3529 IPW_ERROR("Unable to initialize Rx queue\n");
3530 goto error;
3531 }
3532
3533 retry:
3534 /* Ensure interrupts are disabled */
3535 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3536 priv->status &= ~STATUS_INT_ENABLED;
3537
3538 /* ack pending interrupts */
3539 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3540
3541 ipw_stop_nic(priv);
3542
3543 rc = ipw_reset_nic(priv);
3544 if (rc < 0) {
3545 IPW_ERROR("Unable to reset NIC\n");
3546 goto error;
3547 }
3548
3549 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3550 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3551
3552 /* DMA the initial boot firmware into the device */
3553 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3554 if (rc < 0) {
3555 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3556 goto error;
3557 }
3558
3559 /* kick start the device */
3560 ipw_start_nic(priv);
3561
3562 /* wait for the device to finish its initial startup sequence */
3563 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3564 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3565 if (rc < 0) {
3566 IPW_ERROR("device failed to boot initial fw image\n");
3567 goto error;
3568 }
3569 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3570
3571 /* ack fw init done interrupt */
3572 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3573
3574 /* DMA the ucode into the device */
3575 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3576 if (rc < 0) {
3577 IPW_ERROR("Unable to load ucode: %d\n", rc);
3578 goto error;
3579 }
3580
3581 /* stop nic */
3582 ipw_stop_nic(priv);
3583
3584 /* DMA bss firmware into the device */
3585 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3586 if (rc < 0) {
3587 IPW_ERROR("Unable to load firmware: %d\n", rc);
3588 goto error;
3589 }
3590 #ifdef CONFIG_PM
3591 fw_loaded = 1;
3592 #endif
3593
3594 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3595
3596 rc = ipw_queue_reset(priv);
3597 if (rc < 0) {
3598 IPW_ERROR("Unable to initialize queues\n");
3599 goto error;
3600 }
3601
3602 /* Ensure interrupts are disabled */
3603 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3604 /* ack pending interrupts */
3605 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3606
3607 /* kick start the device */
3608 ipw_start_nic(priv);
3609
3610 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3611 if (retries > 0) {
3612 IPW_WARNING("Parity error. Retrying init.\n");
3613 retries--;
3614 goto retry;
3615 }
3616
3617 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3618 rc = -EIO;
3619 goto error;
3620 }
3621
3622 /* wait for the device */
3623 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3624 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3625 if (rc < 0) {
3626 IPW_ERROR("device failed to start within 500ms\n");
3627 goto error;
3628 }
3629 IPW_DEBUG_INFO("device response after %dms\n", rc);
3630
3631 /* ack fw init done interrupt */
3632 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3633
3634 /* read eeprom data and initialize the eeprom region of sram */
3635 priv->eeprom_delay = 1;
3636 ipw_eeprom_init_sram(priv);
3637
3638 /* enable interrupts */
3639 ipw_enable_interrupts(priv);
3640
3641 /* Ensure our queue has valid packets */
3642 ipw_rx_queue_replenish(priv);
3643
3644 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3645
3646 /* ack pending interrupts */
3647 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3648
3649 #ifndef CONFIG_PM
3650 release_firmware(raw);
3651 #endif
3652 return 0;
3653
3654 error:
3655 if (priv->rxq) {
3656 ipw_rx_queue_free(priv, priv->rxq);
3657 priv->rxq = NULL;
3658 }
3659 ipw_tx_queue_free(priv);
3660 if (raw)
3661 release_firmware(raw);
3662 #ifdef CONFIG_PM
3663 fw_loaded = 0;
3664 raw = NULL;
3665 #endif
3666
3667 return rc;
3668 }
3669
3670 /**
3671 * DMA services
3672 *
3673 * Theory of operation
3674 *
3675 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3676 * 2 empty entries always kept in the buffer to protect from overflow.
3677 *
3678 * For Tx queue, there are low mark and high mark limits. If, after queuing
3679 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3680 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3681 * Tx queue resumed.
3682 *
3683 * The IPW operates with six queues, one receive queue in the device's
3684 * sram, one transmit queue for sending commands to the device firmware,
3685 * and four transmit queues for data.
3686 *
3687 * The four transmit queues allow for performing quality of service (qos)
3688 * transmissions as per the 802.11 protocol. Currently Linux does not
3689 * provide a mechanism to the user for utilizing prioritized queues, so
3690 * we only utilize the first data transmit queue (queue1).
3691 */
3692
3693 /**
3694 * Driver allocates buffers of this size for Rx
3695 */
3696
3697 /**
3698 * ipw_rx_queue_space - Return number of free slots available in queue.
3699 */
3700 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3701 {
3702 int s = q->read - q->write;
3703 if (s <= 0)
3704 s += RX_QUEUE_SIZE;
3705 /* keep some buffer to not confuse full and empty queue */
3706 s -= 2;
3707 if (s < 0)
3708 s = 0;
3709 return s;
3710 }
3711
3712 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3713 {
3714 int s = q->last_used - q->first_empty;
3715 if (s <= 0)
3716 s += q->n_bd;
3717 s -= 2; /* keep some reserve to not confuse empty and full situations */
3718 if (s < 0)
3719 s = 0;
3720 return s;
3721 }
3722
3723 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3724 {
3725 return (++index == n_bd) ? 0 : index;
3726 }
3727
3728 /**
3729 * Initialize common DMA queue structure
3730 *
3731 * @param q queue to init
3732 * @param count Number of BD's to allocate. Should be power of 2
3733 * @param read_register Address for 'read' register
3734 * (not offset within BAR, full address)
3735 * @param write_register Address for 'write' register
3736 * (not offset within BAR, full address)
3737 * @param base_register Address for 'base' register
3738 * (not offset within BAR, full address)
3739 * @param size Address for 'size' register
3740 * (not offset within BAR, full address)
3741 */
3742 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3743 int count, u32 read, u32 write, u32 base, u32 size)
3744 {
3745 q->n_bd = count;
3746
3747 q->low_mark = q->n_bd / 4;
3748 if (q->low_mark < 4)
3749 q->low_mark = 4;
3750
3751 q->high_mark = q->n_bd / 8;
3752 if (q->high_mark < 2)
3753 q->high_mark = 2;
3754
3755 q->first_empty = q->last_used = 0;
3756 q->reg_r = read;
3757 q->reg_w = write;
3758
3759 ipw_write32(priv, base, q->dma_addr);
3760 ipw_write32(priv, size, count);
3761 ipw_write32(priv, read, 0);
3762 ipw_write32(priv, write, 0);
3763
3764 _ipw_read32(priv, 0x90);
3765 }
3766
3767 static int ipw_queue_tx_init(struct ipw_priv *priv,
3768 struct clx2_tx_queue *q,
3769 int count, u32 read, u32 write, u32 base, u32 size)
3770 {
3771 struct pci_dev *dev = priv->pci_dev;
3772
3773 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3774 if (!q->txb) {
3775 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3776 return -ENOMEM;
3777 }
3778
3779 q->bd =
3780 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3781 if (!q->bd) {
3782 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3783 sizeof(q->bd[0]) * count);
3784 kfree(q->txb);
3785 q->txb = NULL;
3786 return -ENOMEM;
3787 }
3788
3789 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3790 return 0;
3791 }
3792
3793 /**
3794 * Free one TFD, those at index [txq->q.last_used].
3795 * Do NOT advance any indexes
3796 *
3797 * @param dev
3798 * @param txq
3799 */
3800 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3801 struct clx2_tx_queue *txq)
3802 {
3803 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3804 struct pci_dev *dev = priv->pci_dev;
3805 int i;
3806
3807 /* classify bd */
3808 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3809 /* nothing to cleanup after for host commands */
3810 return;
3811
3812 /* sanity check */
3813 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3814 IPW_ERROR("Too many chunks: %i\n",
3815 le32_to_cpu(bd->u.data.num_chunks));
3816 /** @todo issue fatal error, it is quite serious situation */
3817 return;
3818 }
3819
3820 /* unmap chunks if any */
3821 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3822 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3823 le16_to_cpu(bd->u.data.chunk_len[i]),
3824 PCI_DMA_TODEVICE);
3825 if (txq->txb[txq->q.last_used]) {
3826 libipw_txb_free(txq->txb[txq->q.last_used]);
3827 txq->txb[txq->q.last_used] = NULL;
3828 }
3829 }
3830 }
3831
3832 /**
3833 * Deallocate DMA queue.
3834 *
3835 * Empty queue by removing and destroying all BD's.
3836 * Free all buffers.
3837 *
3838 * @param dev
3839 * @param q
3840 */
3841 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3842 {
3843 struct clx2_queue *q = &txq->q;
3844 struct pci_dev *dev = priv->pci_dev;
3845
3846 if (q->n_bd == 0)
3847 return;
3848
3849 /* first, empty all BD's */
3850 for (; q->first_empty != q->last_used;
3851 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3852 ipw_queue_tx_free_tfd(priv, txq);
3853 }
3854
3855 /* free buffers belonging to queue itself */
3856 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3857 q->dma_addr);
3858 kfree(txq->txb);
3859
3860 /* 0 fill whole structure */
3861 memset(txq, 0, sizeof(*txq));
3862 }
3863
3864 /**
3865 * Destroy all DMA queues and structures
3866 *
3867 * @param priv
3868 */
3869 static void ipw_tx_queue_free(struct ipw_priv *priv)
3870 {
3871 /* Tx CMD queue */
3872 ipw_queue_tx_free(priv, &priv->txq_cmd);
3873
3874 /* Tx queues */
3875 ipw_queue_tx_free(priv, &priv->txq[0]);
3876 ipw_queue_tx_free(priv, &priv->txq[1]);
3877 ipw_queue_tx_free(priv, &priv->txq[2]);
3878 ipw_queue_tx_free(priv, &priv->txq[3]);
3879 }
3880
3881 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3882 {
3883 /* First 3 bytes are manufacturer */
3884 bssid[0] = priv->mac_addr[0];
3885 bssid[1] = priv->mac_addr[1];
3886 bssid[2] = priv->mac_addr[2];
3887
3888 /* Last bytes are random */
3889 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3890
3891 bssid[0] &= 0xfe; /* clear multicast bit */
3892 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3893 }
3894
3895 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3896 {
3897 struct ipw_station_entry entry;
3898 int i;
3899
3900 for (i = 0; i < priv->num_stations; i++) {
3901 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3902 /* Another node is active in network */
3903 priv->missed_adhoc_beacons = 0;
3904 if (!(priv->config & CFG_STATIC_CHANNEL))
3905 /* when other nodes drop out, we drop out */
3906 priv->config &= ~CFG_ADHOC_PERSIST;
3907
3908 return i;
3909 }
3910 }
3911
3912 if (i == MAX_STATIONS)
3913 return IPW_INVALID_STATION;
3914
3915 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3916
3917 entry.reserved = 0;
3918 entry.support_mode = 0;
3919 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3920 memcpy(priv->stations[i], bssid, ETH_ALEN);
3921 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3922 &entry, sizeof(entry));
3923 priv->num_stations++;
3924
3925 return i;
3926 }
3927
3928 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3929 {
3930 int i;
3931
3932 for (i = 0; i < priv->num_stations; i++)
3933 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3934 return i;
3935
3936 return IPW_INVALID_STATION;
3937 }
3938
3939 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3940 {
3941 int err;
3942
3943 if (priv->status & STATUS_ASSOCIATING) {
3944 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3945 schedule_work(&priv->disassociate);
3946 return;
3947 }
3948
3949 if (!(priv->status & STATUS_ASSOCIATED)) {
3950 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3951 return;
3952 }
3953
3954 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3955 "on channel %d.\n",
3956 priv->assoc_request.bssid,
3957 priv->assoc_request.channel);
3958
3959 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3960 priv->status |= STATUS_DISASSOCIATING;
3961
3962 if (quiet)
3963 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3964 else
3965 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3966
3967 err = ipw_send_associate(priv, &priv->assoc_request);
3968 if (err) {
3969 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3970 "failed.\n");
3971 return;
3972 }
3973
3974 }
3975
3976 static int ipw_disassociate(void *data)
3977 {
3978 struct ipw_priv *priv = data;
3979 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3980 return 0;
3981 ipw_send_disassociate(data, 0);
3982 netif_carrier_off(priv->net_dev);
3983 return 1;
3984 }
3985
3986 static void ipw_bg_disassociate(struct work_struct *work)
3987 {
3988 struct ipw_priv *priv =
3989 container_of(work, struct ipw_priv, disassociate);
3990 mutex_lock(&priv->mutex);
3991 ipw_disassociate(priv);
3992 mutex_unlock(&priv->mutex);
3993 }
3994
3995 static void ipw_system_config(struct work_struct *work)
3996 {
3997 struct ipw_priv *priv =
3998 container_of(work, struct ipw_priv, system_config);
3999
4000 #ifdef CONFIG_IPW2200_PROMISCUOUS
4001 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4002 priv->sys_config.accept_all_data_frames = 1;
4003 priv->sys_config.accept_non_directed_frames = 1;
4004 priv->sys_config.accept_all_mgmt_bcpr = 1;
4005 priv->sys_config.accept_all_mgmt_frames = 1;
4006 }
4007 #endif
4008
4009 ipw_send_system_config(priv);
4010 }
4011
4012 struct ipw_status_code {
4013 u16 status;
4014 const char *reason;
4015 };
4016
4017 static const struct ipw_status_code ipw_status_codes[] = {
4018 {0x00, "Successful"},
4019 {0x01, "Unspecified failure"},
4020 {0x0A, "Cannot support all requested capabilities in the "
4021 "Capability information field"},
4022 {0x0B, "Reassociation denied due to inability to confirm that "
4023 "association exists"},
4024 {0x0C, "Association denied due to reason outside the scope of this "
4025 "standard"},
4026 {0x0D,
4027 "Responding station does not support the specified authentication "
4028 "algorithm"},
4029 {0x0E,
4030 "Received an Authentication frame with authentication sequence "
4031 "transaction sequence number out of expected sequence"},
4032 {0x0F, "Authentication rejected because of challenge failure"},
4033 {0x10, "Authentication rejected due to timeout waiting for next "
4034 "frame in sequence"},
4035 {0x11, "Association denied because AP is unable to handle additional "
4036 "associated stations"},
4037 {0x12,
4038 "Association denied due to requesting station not supporting all "
4039 "of the datarates in the BSSBasicServiceSet Parameter"},
4040 {0x13,
4041 "Association denied due to requesting station not supporting "
4042 "short preamble operation"},
4043 {0x14,
4044 "Association denied due to requesting station not supporting "
4045 "PBCC encoding"},
4046 {0x15,
4047 "Association denied due to requesting station not supporting "
4048 "channel agility"},
4049 {0x19,
4050 "Association denied due to requesting station not supporting "
4051 "short slot operation"},
4052 {0x1A,
4053 "Association denied due to requesting station not supporting "
4054 "DSSS-OFDM operation"},
4055 {0x28, "Invalid Information Element"},
4056 {0x29, "Group Cipher is not valid"},
4057 {0x2A, "Pairwise Cipher is not valid"},
4058 {0x2B, "AKMP is not valid"},
4059 {0x2C, "Unsupported RSN IE version"},
4060 {0x2D, "Invalid RSN IE Capabilities"},
4061 {0x2E, "Cipher suite is rejected per security policy"},
4062 };
4063
4064 static const char *ipw_get_status_code(u16 status)
4065 {
4066 int i;
4067 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4068 if (ipw_status_codes[i].status == (status & 0xff))
4069 return ipw_status_codes[i].reason;
4070 return "Unknown status value.";
4071 }
4072
4073 static void inline average_init(struct average *avg)
4074 {
4075 memset(avg, 0, sizeof(*avg));
4076 }
4077
4078 #define DEPTH_RSSI 8
4079 #define DEPTH_NOISE 16
4080 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4081 {
4082 return ((depth-1)*prev_avg + val)/depth;
4083 }
4084
4085 static void average_add(struct average *avg, s16 val)
4086 {
4087 avg->sum -= avg->entries[avg->pos];
4088 avg->sum += val;
4089 avg->entries[avg->pos++] = val;
4090 if (unlikely(avg->pos == AVG_ENTRIES)) {
4091 avg->init = 1;
4092 avg->pos = 0;
4093 }
4094 }
4095
4096 static s16 average_value(struct average *avg)
4097 {
4098 if (!unlikely(avg->init)) {
4099 if (avg->pos)
4100 return avg->sum / avg->pos;
4101 return 0;
4102 }
4103
4104 return avg->sum / AVG_ENTRIES;
4105 }
4106
4107 static void ipw_reset_stats(struct ipw_priv *priv)
4108 {
4109 u32 len = sizeof(u32);
4110
4111 priv->quality = 0;
4112
4113 average_init(&priv->average_missed_beacons);
4114 priv->exp_avg_rssi = -60;
4115 priv->exp_avg_noise = -85 + 0x100;
4116
4117 priv->last_rate = 0;
4118 priv->last_missed_beacons = 0;
4119 priv->last_rx_packets = 0;
4120 priv->last_tx_packets = 0;
4121 priv->last_tx_failures = 0;
4122
4123 /* Firmware managed, reset only when NIC is restarted, so we have to
4124 * normalize on the current value */
4125 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4126 &priv->last_rx_err, &len);
4127 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4128 &priv->last_tx_failures, &len);
4129
4130 /* Driver managed, reset with each association */
4131 priv->missed_adhoc_beacons = 0;
4132 priv->missed_beacons = 0;
4133 priv->tx_packets = 0;
4134 priv->rx_packets = 0;
4135
4136 }
4137
4138 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4139 {
4140 u32 i = 0x80000000;
4141 u32 mask = priv->rates_mask;
4142 /* If currently associated in B mode, restrict the maximum
4143 * rate match to B rates */
4144 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4145 mask &= LIBIPW_CCK_RATES_MASK;
4146
4147 /* TODO: Verify that the rate is supported by the current rates
4148 * list. */
4149
4150 while (i && !(mask & i))
4151 i >>= 1;
4152 switch (i) {
4153 case LIBIPW_CCK_RATE_1MB_MASK:
4154 return 1000000;
4155 case LIBIPW_CCK_RATE_2MB_MASK:
4156 return 2000000;
4157 case LIBIPW_CCK_RATE_5MB_MASK:
4158 return 5500000;
4159 case LIBIPW_OFDM_RATE_6MB_MASK:
4160 return 6000000;
4161 case LIBIPW_OFDM_RATE_9MB_MASK:
4162 return 9000000;
4163 case LIBIPW_CCK_RATE_11MB_MASK:
4164 return 11000000;
4165 case LIBIPW_OFDM_RATE_12MB_MASK:
4166 return 12000000;
4167 case LIBIPW_OFDM_RATE_18MB_MASK:
4168 return 18000000;
4169 case LIBIPW_OFDM_RATE_24MB_MASK:
4170 return 24000000;
4171 case LIBIPW_OFDM_RATE_36MB_MASK:
4172 return 36000000;
4173 case LIBIPW_OFDM_RATE_48MB_MASK:
4174 return 48000000;
4175 case LIBIPW_OFDM_RATE_54MB_MASK:
4176 return 54000000;
4177 }
4178
4179 if (priv->ieee->mode == IEEE_B)
4180 return 11000000;
4181 else
4182 return 54000000;
4183 }
4184
4185 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4186 {
4187 u32 rate, len = sizeof(rate);
4188 int err;
4189
4190 if (!(priv->status & STATUS_ASSOCIATED))
4191 return 0;
4192
4193 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4194 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4195 &len);
4196 if (err) {
4197 IPW_DEBUG_INFO("failed querying ordinals.\n");
4198 return 0;
4199 }
4200 } else
4201 return ipw_get_max_rate(priv);
4202
4203 switch (rate) {
4204 case IPW_TX_RATE_1MB:
4205 return 1000000;
4206 case IPW_TX_RATE_2MB:
4207 return 2000000;
4208 case IPW_TX_RATE_5MB:
4209 return 5500000;
4210 case IPW_TX_RATE_6MB:
4211 return 6000000;
4212 case IPW_TX_RATE_9MB:
4213 return 9000000;
4214 case IPW_TX_RATE_11MB:
4215 return 11000000;
4216 case IPW_TX_RATE_12MB:
4217 return 12000000;
4218 case IPW_TX_RATE_18MB:
4219 return 18000000;
4220 case IPW_TX_RATE_24MB:
4221 return 24000000;
4222 case IPW_TX_RATE_36MB:
4223 return 36000000;
4224 case IPW_TX_RATE_48MB:
4225 return 48000000;
4226 case IPW_TX_RATE_54MB:
4227 return 54000000;
4228 }
4229
4230 return 0;
4231 }
4232
4233 #define IPW_STATS_INTERVAL (2 * HZ)
4234 static void ipw_gather_stats(struct ipw_priv *priv)
4235 {
4236 u32 rx_err, rx_err_delta, rx_packets_delta;
4237 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4238 u32 missed_beacons_percent, missed_beacons_delta;
4239 u32 quality = 0;
4240 u32 len = sizeof(u32);
4241 s16 rssi;
4242 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4243 rate_quality;
4244 u32 max_rate;
4245
4246 if (!(priv->status & STATUS_ASSOCIATED)) {
4247 priv->quality = 0;
4248 return;
4249 }
4250
4251 /* Update the statistics */
4252 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4253 &priv->missed_beacons, &len);
4254 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4255 priv->last_missed_beacons = priv->missed_beacons;
4256 if (priv->assoc_request.beacon_interval) {
4257 missed_beacons_percent = missed_beacons_delta *
4258 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4259 (IPW_STATS_INTERVAL * 10);
4260 } else {
4261 missed_beacons_percent = 0;
4262 }
4263 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4264
4265 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4266 rx_err_delta = rx_err - priv->last_rx_err;
4267 priv->last_rx_err = rx_err;
4268
4269 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4270 tx_failures_delta = tx_failures - priv->last_tx_failures;
4271 priv->last_tx_failures = tx_failures;
4272
4273 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4274 priv->last_rx_packets = priv->rx_packets;
4275
4276 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4277 priv->last_tx_packets = priv->tx_packets;
4278
4279 /* Calculate quality based on the following:
4280 *
4281 * Missed beacon: 100% = 0, 0% = 70% missed
4282 * Rate: 60% = 1Mbs, 100% = Max
4283 * Rx and Tx errors represent a straight % of total Rx/Tx
4284 * RSSI: 100% = > -50, 0% = < -80
4285 * Rx errors: 100% = 0, 0% = 50% missed
4286 *
4287 * The lowest computed quality is used.
4288 *
4289 */
4290 #define BEACON_THRESHOLD 5
4291 beacon_quality = 100 - missed_beacons_percent;
4292 if (beacon_quality < BEACON_THRESHOLD)
4293 beacon_quality = 0;
4294 else
4295 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4296 (100 - BEACON_THRESHOLD);
4297 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4298 beacon_quality, missed_beacons_percent);
4299
4300 priv->last_rate = ipw_get_current_rate(priv);
4301 max_rate = ipw_get_max_rate(priv);
4302 rate_quality = priv->last_rate * 40 / max_rate + 60;
4303 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4304 rate_quality, priv->last_rate / 1000000);
4305
4306 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4307 rx_quality = 100 - (rx_err_delta * 100) /
4308 (rx_packets_delta + rx_err_delta);
4309 else
4310 rx_quality = 100;
4311 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4312 rx_quality, rx_err_delta, rx_packets_delta);
4313
4314 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4315 tx_quality = 100 - (tx_failures_delta * 100) /
4316 (tx_packets_delta + tx_failures_delta);
4317 else
4318 tx_quality = 100;
4319 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4320 tx_quality, tx_failures_delta, tx_packets_delta);
4321
4322 rssi = priv->exp_avg_rssi;
4323 signal_quality =
4324 (100 *
4325 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4326 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4327 (priv->ieee->perfect_rssi - rssi) *
4328 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4329 62 * (priv->ieee->perfect_rssi - rssi))) /
4330 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4331 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4332 if (signal_quality > 100)
4333 signal_quality = 100;
4334 else if (signal_quality < 1)
4335 signal_quality = 0;
4336
4337 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4338 signal_quality, rssi);
4339
4340 quality = min(rx_quality, signal_quality);
4341 quality = min(tx_quality, quality);
4342 quality = min(rate_quality, quality);
4343 quality = min(beacon_quality, quality);
4344 if (quality == beacon_quality)
4345 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4346 quality);
4347 if (quality == rate_quality)
4348 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4349 quality);
4350 if (quality == tx_quality)
4351 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4352 quality);
4353 if (quality == rx_quality)
4354 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4355 quality);
4356 if (quality == signal_quality)
4357 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4358 quality);
4359
4360 priv->quality = quality;
4361
4362 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4363 }
4364
4365 static void ipw_bg_gather_stats(struct work_struct *work)
4366 {
4367 struct ipw_priv *priv =
4368 container_of(work, struct ipw_priv, gather_stats.work);
4369 mutex_lock(&priv->mutex);
4370 ipw_gather_stats(priv);
4371 mutex_unlock(&priv->mutex);
4372 }
4373
4374 /* Missed beacon behavior:
4375 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4376 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4377 * Above disassociate threshold, give up and stop scanning.
4378 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4379 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4380 int missed_count)
4381 {
4382 priv->notif_missed_beacons = missed_count;
4383
4384 if (missed_count > priv->disassociate_threshold &&
4385 priv->status & STATUS_ASSOCIATED) {
4386 /* If associated and we've hit the missed
4387 * beacon threshold, disassociate, turn
4388 * off roaming, and abort any active scans */
4389 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4390 IPW_DL_STATE | IPW_DL_ASSOC,
4391 "Missed beacon: %d - disassociate\n", missed_count);
4392 priv->status &= ~STATUS_ROAMING;
4393 if (priv->status & STATUS_SCANNING) {
4394 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4395 IPW_DL_STATE,
4396 "Aborting scan with missed beacon.\n");
4397 schedule_work(&priv->abort_scan);
4398 }
4399
4400 schedule_work(&priv->disassociate);
4401 return;
4402 }
4403
4404 if (priv->status & STATUS_ROAMING) {
4405 /* If we are currently roaming, then just
4406 * print a debug statement... */
4407 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4408 "Missed beacon: %d - roam in progress\n",
4409 missed_count);
4410 return;
4411 }
4412
4413 if (roaming &&
4414 (missed_count > priv->roaming_threshold &&
4415 missed_count <= priv->disassociate_threshold)) {
4416 /* If we are not already roaming, set the ROAM
4417 * bit in the status and kick off a scan.
4418 * This can happen several times before we reach
4419 * disassociate_threshold. */
4420 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4421 "Missed beacon: %d - initiate "
4422 "roaming\n", missed_count);
4423 if (!(priv->status & STATUS_ROAMING)) {
4424 priv->status |= STATUS_ROAMING;
4425 if (!(priv->status & STATUS_SCANNING))
4426 schedule_delayed_work(&priv->request_scan, 0);
4427 }
4428 return;
4429 }
4430
4431 if (priv->status & STATUS_SCANNING &&
4432 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4433 /* Stop scan to keep fw from getting
4434 * stuck (only if we aren't roaming --
4435 * otherwise we'll never scan more than 2 or 3
4436 * channels..) */
4437 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4438 "Aborting scan with missed beacon.\n");
4439 schedule_work(&priv->abort_scan);
4440 }
4441
4442 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4443 }
4444
4445 static void ipw_scan_event(struct work_struct *work)
4446 {
4447 union iwreq_data wrqu;
4448
4449 struct ipw_priv *priv =
4450 container_of(work, struct ipw_priv, scan_event.work);
4451
4452 wrqu.data.length = 0;
4453 wrqu.data.flags = 0;
4454 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4455 }
4456
4457 static void handle_scan_event(struct ipw_priv *priv)
4458 {
4459 /* Only userspace-requested scan completion events go out immediately */
4460 if (!priv->user_requested_scan) {
4461 if (!delayed_work_pending(&priv->scan_event))
4462 schedule_delayed_work(&priv->scan_event,
4463 round_jiffies_relative(msecs_to_jiffies(4000)));
4464 } else {
4465 union iwreq_data wrqu;
4466
4467 priv->user_requested_scan = 0;
4468 cancel_delayed_work(&priv->scan_event);
4469
4470 wrqu.data.length = 0;
4471 wrqu.data.flags = 0;
4472 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4473 }
4474 }
4475
4476 /**
4477 * Handle host notification packet.
4478 * Called from interrupt routine
4479 */
4480 static void ipw_rx_notification(struct ipw_priv *priv,
4481 struct ipw_rx_notification *notif)
4482 {
4483 DECLARE_SSID_BUF(ssid);
4484 u16 size = le16_to_cpu(notif->size);
4485
4486 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4487
4488 switch (notif->subtype) {
4489 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4490 struct notif_association *assoc = &notif->u.assoc;
4491
4492 switch (assoc->state) {
4493 case CMAS_ASSOCIATED:{
4494 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4495 IPW_DL_ASSOC,
4496 "associated: '%s' %pM\n",
4497 print_ssid(ssid, priv->essid,
4498 priv->essid_len),
4499 priv->bssid);
4500
4501 switch (priv->ieee->iw_mode) {
4502 case IW_MODE_INFRA:
4503 memcpy(priv->ieee->bssid,
4504 priv->bssid, ETH_ALEN);
4505 break;
4506
4507 case IW_MODE_ADHOC:
4508 memcpy(priv->ieee->bssid,
4509 priv->bssid, ETH_ALEN);
4510
4511 /* clear out the station table */
4512 priv->num_stations = 0;
4513
4514 IPW_DEBUG_ASSOC
4515 ("queueing adhoc check\n");
4516 schedule_delayed_work(
4517 &priv->adhoc_check,
4518 le16_to_cpu(priv->
4519 assoc_request.
4520 beacon_interval));
4521 break;
4522 }
4523
4524 priv->status &= ~STATUS_ASSOCIATING;
4525 priv->status |= STATUS_ASSOCIATED;
4526 schedule_work(&priv->system_config);
4527
4528 #ifdef CONFIG_IPW2200_QOS
4529 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4530 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4531 if ((priv->status & STATUS_AUTH) &&
4532 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4533 == IEEE80211_STYPE_ASSOC_RESP)) {
4534 if ((sizeof
4535 (struct
4536 libipw_assoc_response)
4537 <= size)
4538 && (size <= 2314)) {
4539 struct
4540 libipw_rx_stats
4541 stats = {
4542 .len = size - 1,
4543 };
4544
4545 IPW_DEBUG_QOS
4546 ("QoS Associate "
4547 "size %d\n", size);
4548 libipw_rx_mgt(priv->
4549 ieee,
4550 (struct
4551 libipw_hdr_4addr
4552 *)
4553 &notif->u.raw, &stats);
4554 }
4555 }
4556 #endif
4557
4558 schedule_work(&priv->link_up);
4559
4560 break;
4561 }
4562
4563 case CMAS_AUTHENTICATED:{
4564 if (priv->
4565 status & (STATUS_ASSOCIATED |
4566 STATUS_AUTH)) {
4567 struct notif_authenticate *auth
4568 = &notif->u.auth;
4569 IPW_DEBUG(IPW_DL_NOTIF |
4570 IPW_DL_STATE |
4571 IPW_DL_ASSOC,
4572 "deauthenticated: '%s' "
4573 "%pM"
4574 ": (0x%04X) - %s\n",
4575 print_ssid(ssid,
4576 priv->
4577 essid,
4578 priv->
4579 essid_len),
4580 priv->bssid,
4581 le16_to_cpu(auth->status),
4582 ipw_get_status_code
4583 (le16_to_cpu
4584 (auth->status)));
4585
4586 priv->status &=
4587 ~(STATUS_ASSOCIATING |
4588 STATUS_AUTH |
4589 STATUS_ASSOCIATED);
4590
4591 schedule_work(&priv->link_down);
4592 break;
4593 }
4594
4595 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4596 IPW_DL_ASSOC,
4597 "authenticated: '%s' %pM\n",
4598 print_ssid(ssid, priv->essid,
4599 priv->essid_len),
4600 priv->bssid);
4601 break;
4602 }
4603
4604 case CMAS_INIT:{
4605 if (priv->status & STATUS_AUTH) {
4606 struct
4607 libipw_assoc_response
4608 *resp;
4609 resp =
4610 (struct
4611 libipw_assoc_response
4612 *)&notif->u.raw;
4613 IPW_DEBUG(IPW_DL_NOTIF |
4614 IPW_DL_STATE |
4615 IPW_DL_ASSOC,
4616 "association failed (0x%04X): %s\n",
4617 le16_to_cpu(resp->status),
4618 ipw_get_status_code
4619 (le16_to_cpu
4620 (resp->status)));
4621 }
4622
4623 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4624 IPW_DL_ASSOC,
4625 "disassociated: '%s' %pM\n",
4626 print_ssid(ssid, priv->essid,
4627 priv->essid_len),
4628 priv->bssid);
4629
4630 priv->status &=
4631 ~(STATUS_DISASSOCIATING |
4632 STATUS_ASSOCIATING |
4633 STATUS_ASSOCIATED | STATUS_AUTH);
4634 if (priv->assoc_network
4635 && (priv->assoc_network->
4636 capability &
4637 WLAN_CAPABILITY_IBSS))
4638 ipw_remove_current_network
4639 (priv);
4640
4641 schedule_work(&priv->link_down);
4642
4643 break;
4644 }
4645
4646 case CMAS_RX_ASSOC_RESP:
4647 break;
4648
4649 default:
4650 IPW_ERROR("assoc: unknown (%d)\n",
4651 assoc->state);
4652 break;
4653 }
4654
4655 break;
4656 }
4657
4658 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4659 struct notif_authenticate *auth = &notif->u.auth;
4660 switch (auth->state) {
4661 case CMAS_AUTHENTICATED:
4662 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4663 "authenticated: '%s' %pM\n",
4664 print_ssid(ssid, priv->essid,
4665 priv->essid_len),
4666 priv->bssid);
4667 priv->status |= STATUS_AUTH;
4668 break;
4669
4670 case CMAS_INIT:
4671 if (priv->status & STATUS_AUTH) {
4672 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4673 IPW_DL_ASSOC,
4674 "authentication failed (0x%04X): %s\n",
4675 le16_to_cpu(auth->status),
4676 ipw_get_status_code(le16_to_cpu
4677 (auth->
4678 status)));
4679 }
4680 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4681 IPW_DL_ASSOC,
4682 "deauthenticated: '%s' %pM\n",
4683 print_ssid(ssid, priv->essid,
4684 priv->essid_len),
4685 priv->bssid);
4686
4687 priv->status &= ~(STATUS_ASSOCIATING |
4688 STATUS_AUTH |
4689 STATUS_ASSOCIATED);
4690
4691 schedule_work(&priv->link_down);
4692 break;
4693
4694 case CMAS_TX_AUTH_SEQ_1:
4695 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4696 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4697 break;
4698 case CMAS_RX_AUTH_SEQ_2:
4699 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4700 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4701 break;
4702 case CMAS_AUTH_SEQ_1_PASS:
4703 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4704 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4705 break;
4706 case CMAS_AUTH_SEQ_1_FAIL:
4707 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4708 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4709 break;
4710 case CMAS_TX_AUTH_SEQ_3:
4711 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4712 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4713 break;
4714 case CMAS_RX_AUTH_SEQ_4:
4715 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4716 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4717 break;
4718 case CMAS_AUTH_SEQ_2_PASS:
4719 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4720 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4721 break;
4722 case CMAS_AUTH_SEQ_2_FAIL:
4723 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4724 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4725 break;
4726 case CMAS_TX_ASSOC:
4727 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4728 IPW_DL_ASSOC, "TX_ASSOC\n");
4729 break;
4730 case CMAS_RX_ASSOC_RESP:
4731 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4732 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4733
4734 break;
4735 case CMAS_ASSOCIATED:
4736 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4737 IPW_DL_ASSOC, "ASSOCIATED\n");
4738 break;
4739 default:
4740 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4741 auth->state);
4742 break;
4743 }
4744 break;
4745 }
4746
4747 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4748 struct notif_channel_result *x =
4749 &notif->u.channel_result;
4750
4751 if (size == sizeof(*x)) {
4752 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4753 x->channel_num);
4754 } else {
4755 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4756 "(should be %zd)\n",
4757 size, sizeof(*x));
4758 }
4759 break;
4760 }
4761
4762 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4763 struct notif_scan_complete *x = &notif->u.scan_complete;
4764 if (size == sizeof(*x)) {
4765 IPW_DEBUG_SCAN
4766 ("Scan completed: type %d, %d channels, "
4767 "%d status\n", x->scan_type,
4768 x->num_channels, x->status);
4769 } else {
4770 IPW_ERROR("Scan completed of wrong size %d "
4771 "(should be %zd)\n",
4772 size, sizeof(*x));
4773 }
4774
4775 priv->status &=
4776 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4777
4778 wake_up_interruptible(&priv->wait_state);
4779 cancel_delayed_work(&priv->scan_check);
4780
4781 if (priv->status & STATUS_EXIT_PENDING)
4782 break;
4783
4784 priv->ieee->scans++;
4785
4786 #ifdef CONFIG_IPW2200_MONITOR
4787 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4788 priv->status |= STATUS_SCAN_FORCED;
4789 schedule_delayed_work(&priv->request_scan, 0);
4790 break;
4791 }
4792 priv->status &= ~STATUS_SCAN_FORCED;
4793 #endif /* CONFIG_IPW2200_MONITOR */
4794
4795 /* Do queued direct scans first */
4796 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4797 schedule_delayed_work(&priv->request_direct_scan, 0);
4798
4799 if (!(priv->status & (STATUS_ASSOCIATED |
4800 STATUS_ASSOCIATING |
4801 STATUS_ROAMING |
4802 STATUS_DISASSOCIATING)))
4803 schedule_work(&priv->associate);
4804 else if (priv->status & STATUS_ROAMING) {
4805 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4806 /* If a scan completed and we are in roam mode, then
4807 * the scan that completed was the one requested as a
4808 * result of entering roam... so, schedule the
4809 * roam work */
4810 schedule_work(&priv->roam);
4811 else
4812 /* Don't schedule if we aborted the scan */
4813 priv->status &= ~STATUS_ROAMING;
4814 } else if (priv->status & STATUS_SCAN_PENDING)
4815 schedule_delayed_work(&priv->request_scan, 0);
4816 else if (priv->config & CFG_BACKGROUND_SCAN
4817 && priv->status & STATUS_ASSOCIATED)
4818 schedule_delayed_work(&priv->request_scan,
4819 round_jiffies_relative(HZ));
4820
4821 /* Send an empty event to user space.
4822 * We don't send the received data on the event because
4823 * it would require us to do complex transcoding, and
4824 * we want to minimise the work done in the irq handler
4825 * Use a request to extract the data.
4826 * Also, we generate this even for any scan, regardless
4827 * on how the scan was initiated. User space can just
4828 * sync on periodic scan to get fresh data...
4829 * Jean II */
4830 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4831 handle_scan_event(priv);
4832 break;
4833 }
4834
4835 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4836 struct notif_frag_length *x = &notif->u.frag_len;
4837
4838 if (size == sizeof(*x))
4839 IPW_ERROR("Frag length: %d\n",
4840 le16_to_cpu(x->frag_length));
4841 else
4842 IPW_ERROR("Frag length of wrong size %d "
4843 "(should be %zd)\n",
4844 size, sizeof(*x));
4845 break;
4846 }
4847
4848 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4849 struct notif_link_deterioration *x =
4850 &notif->u.link_deterioration;
4851
4852 if (size == sizeof(*x)) {
4853 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4854 "link deterioration: type %d, cnt %d\n",
4855 x->silence_notification_type,
4856 x->silence_count);
4857 memcpy(&priv->last_link_deterioration, x,
4858 sizeof(*x));
4859 } else {
4860 IPW_ERROR("Link Deterioration of wrong size %d "
4861 "(should be %zd)\n",
4862 size, sizeof(*x));
4863 }
4864 break;
4865 }
4866
4867 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4868 IPW_ERROR("Dino config\n");
4869 if (priv->hcmd
4870 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4871 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4872
4873 break;
4874 }
4875
4876 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4877 struct notif_beacon_state *x = &notif->u.beacon_state;
4878 if (size != sizeof(*x)) {
4879 IPW_ERROR
4880 ("Beacon state of wrong size %d (should "
4881 "be %zd)\n", size, sizeof(*x));
4882 break;
4883 }
4884
4885 if (le32_to_cpu(x->state) ==
4886 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4887 ipw_handle_missed_beacon(priv,
4888 le32_to_cpu(x->
4889 number));
4890
4891 break;
4892 }
4893
4894 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4895 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4896 if (size == sizeof(*x)) {
4897 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4898 "0x%02x station %d\n",
4899 x->key_state, x->security_type,
4900 x->station_index);
4901 break;
4902 }
4903
4904 IPW_ERROR
4905 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4906 size, sizeof(*x));
4907 break;
4908 }
4909
4910 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4911 struct notif_calibration *x = &notif->u.calibration;
4912
4913 if (size == sizeof(*x)) {
4914 memcpy(&priv->calib, x, sizeof(*x));
4915 IPW_DEBUG_INFO("TODO: Calibration\n");
4916 break;
4917 }
4918
4919 IPW_ERROR
4920 ("Calibration of wrong size %d (should be %zd)\n",
4921 size, sizeof(*x));
4922 break;
4923 }
4924
4925 case HOST_NOTIFICATION_NOISE_STATS:{
4926 if (size == sizeof(u32)) {
4927 priv->exp_avg_noise =
4928 exponential_average(priv->exp_avg_noise,
4929 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4930 DEPTH_NOISE);
4931 break;
4932 }
4933
4934 IPW_ERROR
4935 ("Noise stat is wrong size %d (should be %zd)\n",
4936 size, sizeof(u32));
4937 break;
4938 }
4939
4940 default:
4941 IPW_DEBUG_NOTIF("Unknown notification: "
4942 "subtype=%d,flags=0x%2x,size=%d\n",
4943 notif->subtype, notif->flags, size);
4944 }
4945 }
4946
4947 /**
4948 * Destroys all DMA structures and initialise them again
4949 *
4950 * @param priv
4951 * @return error code
4952 */
4953 static int ipw_queue_reset(struct ipw_priv *priv)
4954 {
4955 int rc = 0;
4956 /** @todo customize queue sizes */
4957 int nTx = 64, nTxCmd = 8;
4958 ipw_tx_queue_free(priv);
4959 /* Tx CMD queue */
4960 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4961 IPW_TX_CMD_QUEUE_READ_INDEX,
4962 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4963 IPW_TX_CMD_QUEUE_BD_BASE,
4964 IPW_TX_CMD_QUEUE_BD_SIZE);
4965 if (rc) {
4966 IPW_ERROR("Tx Cmd queue init failed\n");
4967 goto error;
4968 }
4969 /* Tx queue(s) */
4970 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4971 IPW_TX_QUEUE_0_READ_INDEX,
4972 IPW_TX_QUEUE_0_WRITE_INDEX,
4973 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4974 if (rc) {
4975 IPW_ERROR("Tx 0 queue init failed\n");
4976 goto error;
4977 }
4978 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4979 IPW_TX_QUEUE_1_READ_INDEX,
4980 IPW_TX_QUEUE_1_WRITE_INDEX,
4981 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4982 if (rc) {
4983 IPW_ERROR("Tx 1 queue init failed\n");
4984 goto error;
4985 }
4986 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4987 IPW_TX_QUEUE_2_READ_INDEX,
4988 IPW_TX_QUEUE_2_WRITE_INDEX,
4989 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4990 if (rc) {
4991 IPW_ERROR("Tx 2 queue init failed\n");
4992 goto error;
4993 }
4994 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4995 IPW_TX_QUEUE_3_READ_INDEX,
4996 IPW_TX_QUEUE_3_WRITE_INDEX,
4997 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4998 if (rc) {
4999 IPW_ERROR("Tx 3 queue init failed\n");
5000 goto error;
5001 }
5002 /* statistics */
5003 priv->rx_bufs_min = 0;
5004 priv->rx_pend_max = 0;
5005 return rc;
5006
5007 error:
5008 ipw_tx_queue_free(priv);
5009 return rc;
5010 }
5011
5012 /**
5013 * Reclaim Tx queue entries no more used by NIC.
5014 *
5015 * When FW advances 'R' index, all entries between old and
5016 * new 'R' index need to be reclaimed. As result, some free space
5017 * forms. If there is enough free space (> low mark), wake Tx queue.
5018 *
5019 * @note Need to protect against garbage in 'R' index
5020 * @param priv
5021 * @param txq
5022 * @param qindex
5023 * @return Number of used entries remains in the queue
5024 */
5025 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5026 struct clx2_tx_queue *txq, int qindex)
5027 {
5028 u32 hw_tail;
5029 int used;
5030 struct clx2_queue *q = &txq->q;
5031
5032 hw_tail = ipw_read32(priv, q->reg_r);
5033 if (hw_tail >= q->n_bd) {
5034 IPW_ERROR
5035 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5036 hw_tail, q->n_bd);
5037 goto done;
5038 }
5039 for (; q->last_used != hw_tail;
5040 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5041 ipw_queue_tx_free_tfd(priv, txq);
5042 priv->tx_packets++;
5043 }
5044 done:
5045 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5046 (qindex >= 0))
5047 netif_wake_queue(priv->net_dev);
5048 used = q->first_empty - q->last_used;
5049 if (used < 0)
5050 used += q->n_bd;
5051
5052 return used;
5053 }
5054
5055 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5056 int len, int sync)
5057 {
5058 struct clx2_tx_queue *txq = &priv->txq_cmd;
5059 struct clx2_queue *q = &txq->q;
5060 struct tfd_frame *tfd;
5061
5062 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5063 IPW_ERROR("No space for Tx\n");
5064 return -EBUSY;
5065 }
5066
5067 tfd = &txq->bd[q->first_empty];
5068 txq->txb[q->first_empty] = NULL;
5069
5070 memset(tfd, 0, sizeof(*tfd));
5071 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5072 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5073 priv->hcmd_seq++;
5074 tfd->u.cmd.index = hcmd;
5075 tfd->u.cmd.length = len;
5076 memcpy(tfd->u.cmd.payload, buf, len);
5077 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5078 ipw_write32(priv, q->reg_w, q->first_empty);
5079 _ipw_read32(priv, 0x90);
5080
5081 return 0;
5082 }
5083
5084 /*
5085 * Rx theory of operation
5086 *
5087 * The host allocates 32 DMA target addresses and passes the host address
5088 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5089 * 0 to 31
5090 *
5091 * Rx Queue Indexes
5092 * The host/firmware share two index registers for managing the Rx buffers.
5093 *
5094 * The READ index maps to the first position that the firmware may be writing
5095 * to -- the driver can read up to (but not including) this position and get
5096 * good data.
5097 * The READ index is managed by the firmware once the card is enabled.
5098 *
5099 * The WRITE index maps to the last position the driver has read from -- the
5100 * position preceding WRITE is the last slot the firmware can place a packet.
5101 *
5102 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5103 * WRITE = READ.
5104 *
5105 * During initialization the host sets up the READ queue position to the first
5106 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5107 *
5108 * When the firmware places a packet in a buffer it will advance the READ index
5109 * and fire the RX interrupt. The driver can then query the READ index and
5110 * process as many packets as possible, moving the WRITE index forward as it
5111 * resets the Rx queue buffers with new memory.
5112 *
5113 * The management in the driver is as follows:
5114 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5115 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5116 * to replensish the ipw->rxq->rx_free.
5117 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5118 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5119 * 'processed' and 'read' driver indexes as well)
5120 * + A received packet is processed and handed to the kernel network stack,
5121 * detached from the ipw->rxq. The driver 'processed' index is updated.
5122 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5123 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5124 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5125 * were enough free buffers and RX_STALLED is set it is cleared.
5126 *
5127 *
5128 * Driver sequence:
5129 *
5130 * ipw_rx_queue_alloc() Allocates rx_free
5131 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5132 * ipw_rx_queue_restock
5133 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5134 * queue, updates firmware pointers, and updates
5135 * the WRITE index. If insufficient rx_free buffers
5136 * are available, schedules ipw_rx_queue_replenish
5137 *
5138 * -- enable interrupts --
5139 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5140 * READ INDEX, detaching the SKB from the pool.
5141 * Moves the packet buffer from queue to rx_used.
5142 * Calls ipw_rx_queue_restock to refill any empty
5143 * slots.
5144 * ...
5145 *
5146 */
5147
5148 /*
5149 * If there are slots in the RX queue that need to be restocked,
5150 * and we have free pre-allocated buffers, fill the ranks as much
5151 * as we can pulling from rx_free.
5152 *
5153 * This moves the 'write' index forward to catch up with 'processed', and
5154 * also updates the memory address in the firmware to reference the new
5155 * target buffer.
5156 */
5157 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5158 {
5159 struct ipw_rx_queue *rxq = priv->rxq;
5160 struct list_head *element;
5161 struct ipw_rx_mem_buffer *rxb;
5162 unsigned long flags;
5163 int write;
5164
5165 spin_lock_irqsave(&rxq->lock, flags);
5166 write = rxq->write;
5167 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5168 element = rxq->rx_free.next;
5169 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5170 list_del(element);
5171
5172 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5173 rxb->dma_addr);
5174 rxq->queue[rxq->write] = rxb;
5175 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5176 rxq->free_count--;
5177 }
5178 spin_unlock_irqrestore(&rxq->lock, flags);
5179
5180 /* If the pre-allocated buffer pool is dropping low, schedule to
5181 * refill it */
5182 if (rxq->free_count <= RX_LOW_WATERMARK)
5183 schedule_work(&priv->rx_replenish);
5184
5185 /* If we've added more space for the firmware to place data, tell it */
5186 if (write != rxq->write)
5187 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5188 }
5189
5190 /*
5191 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5192 * Also restock the Rx queue via ipw_rx_queue_restock.
5193 *
5194 * This is called as a scheduled work item (except for during intialization)
5195 */
5196 static void ipw_rx_queue_replenish(void *data)
5197 {
5198 struct ipw_priv *priv = data;
5199 struct ipw_rx_queue *rxq = priv->rxq;
5200 struct list_head *element;
5201 struct ipw_rx_mem_buffer *rxb;
5202 unsigned long flags;
5203
5204 spin_lock_irqsave(&rxq->lock, flags);
5205 while (!list_empty(&rxq->rx_used)) {
5206 element = rxq->rx_used.next;
5207 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5208 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5209 if (!rxb->skb) {
5210 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5211 priv->net_dev->name);
5212 /* We don't reschedule replenish work here -- we will
5213 * call the restock method and if it still needs
5214 * more buffers it will schedule replenish */
5215 break;
5216 }
5217 list_del(element);
5218
5219 rxb->dma_addr =
5220 pci_map_single(priv->pci_dev, rxb->skb->data,
5221 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5222
5223 list_add_tail(&rxb->list, &rxq->rx_free);
5224 rxq->free_count++;
5225 }
5226 spin_unlock_irqrestore(&rxq->lock, flags);
5227
5228 ipw_rx_queue_restock(priv);
5229 }
5230
5231 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5232 {
5233 struct ipw_priv *priv =
5234 container_of(work, struct ipw_priv, rx_replenish);
5235 mutex_lock(&priv->mutex);
5236 ipw_rx_queue_replenish(priv);
5237 mutex_unlock(&priv->mutex);
5238 }
5239
5240 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5241 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5242 * This free routine walks the list of POOL entries and if SKB is set to
5243 * non NULL it is unmapped and freed
5244 */
5245 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5246 {
5247 int i;
5248
5249 if (!rxq)
5250 return;
5251
5252 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5253 if (rxq->pool[i].skb != NULL) {
5254 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5255 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5256 dev_kfree_skb(rxq->pool[i].skb);
5257 }
5258 }
5259
5260 kfree(rxq);
5261 }
5262
5263 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5264 {
5265 struct ipw_rx_queue *rxq;
5266 int i;
5267
5268 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5269 if (unlikely(!rxq)) {
5270 IPW_ERROR("memory allocation failed\n");
5271 return NULL;
5272 }
5273 spin_lock_init(&rxq->lock);
5274 INIT_LIST_HEAD(&rxq->rx_free);
5275 INIT_LIST_HEAD(&rxq->rx_used);
5276
5277 /* Fill the rx_used queue with _all_ of the Rx buffers */
5278 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5279 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5280
5281 /* Set us so that we have processed and used all buffers, but have
5282 * not restocked the Rx queue with fresh buffers */
5283 rxq->read = rxq->write = 0;
5284 rxq->free_count = 0;
5285
5286 return rxq;
5287 }
5288
5289 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5290 {
5291 rate &= ~LIBIPW_BASIC_RATE_MASK;
5292 if (ieee_mode == IEEE_A) {
5293 switch (rate) {
5294 case LIBIPW_OFDM_RATE_6MB:
5295 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5296 1 : 0;
5297 case LIBIPW_OFDM_RATE_9MB:
5298 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5299 1 : 0;
5300 case LIBIPW_OFDM_RATE_12MB:
5301 return priv->
5302 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5303 case LIBIPW_OFDM_RATE_18MB:
5304 return priv->
5305 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5306 case LIBIPW_OFDM_RATE_24MB:
5307 return priv->
5308 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5309 case LIBIPW_OFDM_RATE_36MB:
5310 return priv->
5311 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5312 case LIBIPW_OFDM_RATE_48MB:
5313 return priv->
5314 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5315 case LIBIPW_OFDM_RATE_54MB:
5316 return priv->
5317 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5318 default:
5319 return 0;
5320 }
5321 }
5322
5323 /* B and G mixed */
5324 switch (rate) {
5325 case LIBIPW_CCK_RATE_1MB:
5326 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5327 case LIBIPW_CCK_RATE_2MB:
5328 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5329 case LIBIPW_CCK_RATE_5MB:
5330 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5331 case LIBIPW_CCK_RATE_11MB:
5332 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5333 }
5334
5335 /* If we are limited to B modulations, bail at this point */
5336 if (ieee_mode == IEEE_B)
5337 return 0;
5338
5339 /* G */
5340 switch (rate) {
5341 case LIBIPW_OFDM_RATE_6MB:
5342 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5343 case LIBIPW_OFDM_RATE_9MB:
5344 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5345 case LIBIPW_OFDM_RATE_12MB:
5346 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5347 case LIBIPW_OFDM_RATE_18MB:
5348 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5349 case LIBIPW_OFDM_RATE_24MB:
5350 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5351 case LIBIPW_OFDM_RATE_36MB:
5352 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5353 case LIBIPW_OFDM_RATE_48MB:
5354 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5355 case LIBIPW_OFDM_RATE_54MB:
5356 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5357 }
5358
5359 return 0;
5360 }
5361
5362 static int ipw_compatible_rates(struct ipw_priv *priv,
5363 const struct libipw_network *network,
5364 struct ipw_supported_rates *rates)
5365 {
5366 int num_rates, i;
5367
5368 memset(rates, 0, sizeof(*rates));
5369 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5370 rates->num_rates = 0;
5371 for (i = 0; i < num_rates; i++) {
5372 if (!ipw_is_rate_in_mask(priv, network->mode,
5373 network->rates[i])) {
5374
5375 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5376 IPW_DEBUG_SCAN("Adding masked mandatory "
5377 "rate %02X\n",
5378 network->rates[i]);
5379 rates->supported_rates[rates->num_rates++] =
5380 network->rates[i];
5381 continue;
5382 }
5383
5384 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5385 network->rates[i], priv->rates_mask);
5386 continue;
5387 }
5388
5389 rates->supported_rates[rates->num_rates++] = network->rates[i];
5390 }
5391
5392 num_rates = min(network->rates_ex_len,
5393 (u8) (IPW_MAX_RATES - num_rates));
5394 for (i = 0; i < num_rates; i++) {
5395 if (!ipw_is_rate_in_mask(priv, network->mode,
5396 network->rates_ex[i])) {
5397 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5398 IPW_DEBUG_SCAN("Adding masked mandatory "
5399 "rate %02X\n",
5400 network->rates_ex[i]);
5401 rates->supported_rates[rates->num_rates++] =
5402 network->rates[i];
5403 continue;
5404 }
5405
5406 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5407 network->rates_ex[i], priv->rates_mask);
5408 continue;
5409 }
5410
5411 rates->supported_rates[rates->num_rates++] =
5412 network->rates_ex[i];
5413 }
5414
5415 return 1;
5416 }
5417
5418 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5419 const struct ipw_supported_rates *src)
5420 {
5421 u8 i;
5422 for (i = 0; i < src->num_rates; i++)
5423 dest->supported_rates[i] = src->supported_rates[i];
5424 dest->num_rates = src->num_rates;
5425 }
5426
5427 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5428 * mask should ever be used -- right now all callers to add the scan rates are
5429 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5430 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5431 u8 modulation, u32 rate_mask)
5432 {
5433 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5434 LIBIPW_BASIC_RATE_MASK : 0;
5435
5436 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5437 rates->supported_rates[rates->num_rates++] =
5438 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5439
5440 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5441 rates->supported_rates[rates->num_rates++] =
5442 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5443
5444 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5445 rates->supported_rates[rates->num_rates++] = basic_mask |
5446 LIBIPW_CCK_RATE_5MB;
5447
5448 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5449 rates->supported_rates[rates->num_rates++] = basic_mask |
5450 LIBIPW_CCK_RATE_11MB;
5451 }
5452
5453 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5454 u8 modulation, u32 rate_mask)
5455 {
5456 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5457 LIBIPW_BASIC_RATE_MASK : 0;
5458
5459 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5460 rates->supported_rates[rates->num_rates++] = basic_mask |
5461 LIBIPW_OFDM_RATE_6MB;
5462
5463 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5464 rates->supported_rates[rates->num_rates++] =
5465 LIBIPW_OFDM_RATE_9MB;
5466
5467 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5468 rates->supported_rates[rates->num_rates++] = basic_mask |
5469 LIBIPW_OFDM_RATE_12MB;
5470
5471 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5472 rates->supported_rates[rates->num_rates++] =
5473 LIBIPW_OFDM_RATE_18MB;
5474
5475 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5476 rates->supported_rates[rates->num_rates++] = basic_mask |
5477 LIBIPW_OFDM_RATE_24MB;
5478
5479 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5480 rates->supported_rates[rates->num_rates++] =
5481 LIBIPW_OFDM_RATE_36MB;
5482
5483 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5484 rates->supported_rates[rates->num_rates++] =
5485 LIBIPW_OFDM_RATE_48MB;
5486
5487 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5488 rates->supported_rates[rates->num_rates++] =
5489 LIBIPW_OFDM_RATE_54MB;
5490 }
5491
5492 struct ipw_network_match {
5493 struct libipw_network *network;
5494 struct ipw_supported_rates rates;
5495 };
5496
5497 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5498 struct ipw_network_match *match,
5499 struct libipw_network *network,
5500 int roaming)
5501 {
5502 struct ipw_supported_rates rates;
5503 DECLARE_SSID_BUF(ssid);
5504
5505 /* Verify that this network's capability is compatible with the
5506 * current mode (AdHoc or Infrastructure) */
5507 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5508 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5509 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5510 "capability mismatch.\n",
5511 print_ssid(ssid, network->ssid,
5512 network->ssid_len),
5513 network->bssid);
5514 return 0;
5515 }
5516
5517 if (unlikely(roaming)) {
5518 /* If we are roaming, then ensure check if this is a valid
5519 * network to try and roam to */
5520 if ((network->ssid_len != match->network->ssid_len) ||
5521 memcmp(network->ssid, match->network->ssid,
5522 network->ssid_len)) {
5523 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5524 "because of non-network ESSID.\n",
5525 print_ssid(ssid, network->ssid,
5526 network->ssid_len),
5527 network->bssid);
5528 return 0;
5529 }
5530 } else {
5531 /* If an ESSID has been configured then compare the broadcast
5532 * ESSID to ours */
5533 if ((priv->config & CFG_STATIC_ESSID) &&
5534 ((network->ssid_len != priv->essid_len) ||
5535 memcmp(network->ssid, priv->essid,
5536 min(network->ssid_len, priv->essid_len)))) {
5537 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5538
5539 strncpy(escaped,
5540 print_ssid(ssid, network->ssid,
5541 network->ssid_len),
5542 sizeof(escaped));
5543 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5544 "because of ESSID mismatch: '%s'.\n",
5545 escaped, network->bssid,
5546 print_ssid(ssid, priv->essid,
5547 priv->essid_len));
5548 return 0;
5549 }
5550 }
5551
5552 /* If the old network rate is better than this one, don't bother
5553 * testing everything else. */
5554
5555 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5556 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5557 "current network.\n",
5558 print_ssid(ssid, match->network->ssid,
5559 match->network->ssid_len));
5560 return 0;
5561 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5562 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5563 "current network.\n",
5564 print_ssid(ssid, match->network->ssid,
5565 match->network->ssid_len));
5566 return 0;
5567 }
5568
5569 /* Now go through and see if the requested network is valid... */
5570 if (priv->ieee->scan_age != 0 &&
5571 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5572 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5573 "because of age: %ums.\n",
5574 print_ssid(ssid, network->ssid,
5575 network->ssid_len),
5576 network->bssid,
5577 jiffies_to_msecs(jiffies -
5578 network->last_scanned));
5579 return 0;
5580 }
5581
5582 if ((priv->config & CFG_STATIC_CHANNEL) &&
5583 (network->channel != priv->channel)) {
5584 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5585 "because of channel mismatch: %d != %d.\n",
5586 print_ssid(ssid, network->ssid,
5587 network->ssid_len),
5588 network->bssid,
5589 network->channel, priv->channel);
5590 return 0;
5591 }
5592
5593 /* Verify privacy compatibility */
5594 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5595 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5596 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5597 "because of privacy mismatch: %s != %s.\n",
5598 print_ssid(ssid, network->ssid,
5599 network->ssid_len),
5600 network->bssid,
5601 priv->
5602 capability & CAP_PRIVACY_ON ? "on" : "off",
5603 network->
5604 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5605 "off");
5606 return 0;
5607 }
5608
5609 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5610 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5611 "because of the same BSSID match: %pM"
5612 ".\n", print_ssid(ssid, network->ssid,
5613 network->ssid_len),
5614 network->bssid,
5615 priv->bssid);
5616 return 0;
5617 }
5618
5619 /* Filter out any incompatible freq / mode combinations */
5620 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5621 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5622 "because of invalid frequency/mode "
5623 "combination.\n",
5624 print_ssid(ssid, network->ssid,
5625 network->ssid_len),
5626 network->bssid);
5627 return 0;
5628 }
5629
5630 /* Ensure that the rates supported by the driver are compatible with
5631 * this AP, including verification of basic rates (mandatory) */
5632 if (!ipw_compatible_rates(priv, network, &rates)) {
5633 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5634 "because configured rate mask excludes "
5635 "AP mandatory rate.\n",
5636 print_ssid(ssid, network->ssid,
5637 network->ssid_len),
5638 network->bssid);
5639 return 0;
5640 }
5641
5642 if (rates.num_rates == 0) {
5643 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5644 "because of no compatible rates.\n",
5645 print_ssid(ssid, network->ssid,
5646 network->ssid_len),
5647 network->bssid);
5648 return 0;
5649 }
5650
5651 /* TODO: Perform any further minimal comparititive tests. We do not
5652 * want to put too much policy logic here; intelligent scan selection
5653 * should occur within a generic IEEE 802.11 user space tool. */
5654
5655 /* Set up 'new' AP to this network */
5656 ipw_copy_rates(&match->rates, &rates);
5657 match->network = network;
5658 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5659 print_ssid(ssid, network->ssid, network->ssid_len),
5660 network->bssid);
5661
5662 return 1;
5663 }
5664
5665 static void ipw_merge_adhoc_network(struct work_struct *work)
5666 {
5667 DECLARE_SSID_BUF(ssid);
5668 struct ipw_priv *priv =
5669 container_of(work, struct ipw_priv, merge_networks);
5670 struct libipw_network *network = NULL;
5671 struct ipw_network_match match = {
5672 .network = priv->assoc_network
5673 };
5674
5675 if ((priv->status & STATUS_ASSOCIATED) &&
5676 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5677 /* First pass through ROAM process -- look for a better
5678 * network */
5679 unsigned long flags;
5680
5681 spin_lock_irqsave(&priv->ieee->lock, flags);
5682 list_for_each_entry(network, &priv->ieee->network_list, list) {
5683 if (network != priv->assoc_network)
5684 ipw_find_adhoc_network(priv, &match, network,
5685 1);
5686 }
5687 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5688
5689 if (match.network == priv->assoc_network) {
5690 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5691 "merge to.\n");
5692 return;
5693 }
5694
5695 mutex_lock(&priv->mutex);
5696 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5697 IPW_DEBUG_MERGE("remove network %s\n",
5698 print_ssid(ssid, priv->essid,
5699 priv->essid_len));
5700 ipw_remove_current_network(priv);
5701 }
5702
5703 ipw_disassociate(priv);
5704 priv->assoc_network = match.network;
5705 mutex_unlock(&priv->mutex);
5706 return;
5707 }
5708 }
5709
5710 static int ipw_best_network(struct ipw_priv *priv,
5711 struct ipw_network_match *match,
5712 struct libipw_network *network, int roaming)
5713 {
5714 struct ipw_supported_rates rates;
5715 DECLARE_SSID_BUF(ssid);
5716
5717 /* Verify that this network's capability is compatible with the
5718 * current mode (AdHoc or Infrastructure) */
5719 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5720 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5721 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5722 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5723 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5724 "capability mismatch.\n",
5725 print_ssid(ssid, network->ssid,
5726 network->ssid_len),
5727 network->bssid);
5728 return 0;
5729 }
5730
5731 if (unlikely(roaming)) {
5732 /* If we are roaming, then ensure check if this is a valid
5733 * network to try and roam to */
5734 if ((network->ssid_len != match->network->ssid_len) ||
5735 memcmp(network->ssid, match->network->ssid,
5736 network->ssid_len)) {
5737 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5738 "because of non-network ESSID.\n",
5739 print_ssid(ssid, network->ssid,
5740 network->ssid_len),
5741 network->bssid);
5742 return 0;
5743 }
5744 } else {
5745 /* If an ESSID has been configured then compare the broadcast
5746 * ESSID to ours */
5747 if ((priv->config & CFG_STATIC_ESSID) &&
5748 ((network->ssid_len != priv->essid_len) ||
5749 memcmp(network->ssid, priv->essid,
5750 min(network->ssid_len, priv->essid_len)))) {
5751 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5752 strncpy(escaped,
5753 print_ssid(ssid, network->ssid,
5754 network->ssid_len),
5755 sizeof(escaped));
5756 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5757 "because of ESSID mismatch: '%s'.\n",
5758 escaped, network->bssid,
5759 print_ssid(ssid, priv->essid,
5760 priv->essid_len));
5761 return 0;
5762 }
5763 }
5764
5765 /* If the old network rate is better than this one, don't bother
5766 * testing everything else. */
5767 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5768 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5769 strncpy(escaped,
5770 print_ssid(ssid, network->ssid, network->ssid_len),
5771 sizeof(escaped));
5772 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5773 "'%s (%pM)' has a stronger signal.\n",
5774 escaped, network->bssid,
5775 print_ssid(ssid, match->network->ssid,
5776 match->network->ssid_len),
5777 match->network->bssid);
5778 return 0;
5779 }
5780
5781 /* If this network has already had an association attempt within the
5782 * last 3 seconds, do not try and associate again... */
5783 if (network->last_associate &&
5784 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5785 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5786 "because of storming (%ums since last "
5787 "assoc attempt).\n",
5788 print_ssid(ssid, network->ssid,
5789 network->ssid_len),
5790 network->bssid,
5791 jiffies_to_msecs(jiffies -
5792 network->last_associate));
5793 return 0;
5794 }
5795
5796 /* Now go through and see if the requested network is valid... */
5797 if (priv->ieee->scan_age != 0 &&
5798 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5799 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5800 "because of age: %ums.\n",
5801 print_ssid(ssid, network->ssid,
5802 network->ssid_len),
5803 network->bssid,
5804 jiffies_to_msecs(jiffies -
5805 network->last_scanned));
5806 return 0;
5807 }
5808
5809 if ((priv->config & CFG_STATIC_CHANNEL) &&
5810 (network->channel != priv->channel)) {
5811 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5812 "because of channel mismatch: %d != %d.\n",
5813 print_ssid(ssid, network->ssid,
5814 network->ssid_len),
5815 network->bssid,
5816 network->channel, priv->channel);
5817 return 0;
5818 }
5819
5820 /* Verify privacy compatibility */
5821 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5822 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5823 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5824 "because of privacy mismatch: %s != %s.\n",
5825 print_ssid(ssid, network->ssid,
5826 network->ssid_len),
5827 network->bssid,
5828 priv->capability & CAP_PRIVACY_ON ? "on" :
5829 "off",
5830 network->capability &
5831 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5832 return 0;
5833 }
5834
5835 if ((priv->config & CFG_STATIC_BSSID) &&
5836 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5837 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5838 "because of BSSID mismatch: %pM.\n",
5839 print_ssid(ssid, network->ssid,
5840 network->ssid_len),
5841 network->bssid, priv->bssid);
5842 return 0;
5843 }
5844
5845 /* Filter out any incompatible freq / mode combinations */
5846 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5847 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5848 "because of invalid frequency/mode "
5849 "combination.\n",
5850 print_ssid(ssid, network->ssid,
5851 network->ssid_len),
5852 network->bssid);
5853 return 0;
5854 }
5855
5856 /* Filter out invalid channel in current GEO */
5857 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5858 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5859 "because of invalid channel in current GEO\n",
5860 print_ssid(ssid, network->ssid,
5861 network->ssid_len),
5862 network->bssid);
5863 return 0;
5864 }
5865
5866 /* Ensure that the rates supported by the driver are compatible with
5867 * this AP, including verification of basic rates (mandatory) */
5868 if (!ipw_compatible_rates(priv, network, &rates)) {
5869 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5870 "because configured rate mask excludes "
5871 "AP mandatory rate.\n",
5872 print_ssid(ssid, network->ssid,
5873 network->ssid_len),
5874 network->bssid);
5875 return 0;
5876 }
5877
5878 if (rates.num_rates == 0) {
5879 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5880 "because of no compatible rates.\n",
5881 print_ssid(ssid, network->ssid,
5882 network->ssid_len),
5883 network->bssid);
5884 return 0;
5885 }
5886
5887 /* TODO: Perform any further minimal comparititive tests. We do not
5888 * want to put too much policy logic here; intelligent scan selection
5889 * should occur within a generic IEEE 802.11 user space tool. */
5890
5891 /* Set up 'new' AP to this network */
5892 ipw_copy_rates(&match->rates, &rates);
5893 match->network = network;
5894
5895 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5896 print_ssid(ssid, network->ssid, network->ssid_len),
5897 network->bssid);
5898
5899 return 1;
5900 }
5901
5902 static void ipw_adhoc_create(struct ipw_priv *priv,
5903 struct libipw_network *network)
5904 {
5905 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5906 int i;
5907
5908 /*
5909 * For the purposes of scanning, we can set our wireless mode
5910 * to trigger scans across combinations of bands, but when it
5911 * comes to creating a new ad-hoc network, we have tell the FW
5912 * exactly which band to use.
5913 *
5914 * We also have the possibility of an invalid channel for the
5915 * chossen band. Attempting to create a new ad-hoc network
5916 * with an invalid channel for wireless mode will trigger a
5917 * FW fatal error.
5918 *
5919 */
5920 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5921 case LIBIPW_52GHZ_BAND:
5922 network->mode = IEEE_A;
5923 i = libipw_channel_to_index(priv->ieee, priv->channel);
5924 BUG_ON(i == -1);
5925 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5926 IPW_WARNING("Overriding invalid channel\n");
5927 priv->channel = geo->a[0].channel;
5928 }
5929 break;
5930
5931 case LIBIPW_24GHZ_BAND:
5932 if (priv->ieee->mode & IEEE_G)
5933 network->mode = IEEE_G;
5934 else
5935 network->mode = IEEE_B;
5936 i = libipw_channel_to_index(priv->ieee, priv->channel);
5937 BUG_ON(i == -1);
5938 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5939 IPW_WARNING("Overriding invalid channel\n");
5940 priv->channel = geo->bg[0].channel;
5941 }
5942 break;
5943
5944 default:
5945 IPW_WARNING("Overriding invalid channel\n");
5946 if (priv->ieee->mode & IEEE_A) {
5947 network->mode = IEEE_A;
5948 priv->channel = geo->a[0].channel;
5949 } else if (priv->ieee->mode & IEEE_G) {
5950 network->mode = IEEE_G;
5951 priv->channel = geo->bg[0].channel;
5952 } else {
5953 network->mode = IEEE_B;
5954 priv->channel = geo->bg[0].channel;
5955 }
5956 break;
5957 }
5958
5959 network->channel = priv->channel;
5960 priv->config |= CFG_ADHOC_PERSIST;
5961 ipw_create_bssid(priv, network->bssid);
5962 network->ssid_len = priv->essid_len;
5963 memcpy(network->ssid, priv->essid, priv->essid_len);
5964 memset(&network->stats, 0, sizeof(network->stats));
5965 network->capability = WLAN_CAPABILITY_IBSS;
5966 if (!(priv->config & CFG_PREAMBLE_LONG))
5967 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5968 if (priv->capability & CAP_PRIVACY_ON)
5969 network->capability |= WLAN_CAPABILITY_PRIVACY;
5970 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5971 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5972 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5973 memcpy(network->rates_ex,
5974 &priv->rates.supported_rates[network->rates_len],
5975 network->rates_ex_len);
5976 network->last_scanned = 0;
5977 network->flags = 0;
5978 network->last_associate = 0;
5979 network->time_stamp[0] = 0;
5980 network->time_stamp[1] = 0;
5981 network->beacon_interval = 100; /* Default */
5982 network->listen_interval = 10; /* Default */
5983 network->atim_window = 0; /* Default */
5984 network->wpa_ie_len = 0;
5985 network->rsn_ie_len = 0;
5986 }
5987
5988 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5989 {
5990 struct ipw_tgi_tx_key key;
5991
5992 if (!(priv->ieee->sec.flags & (1 << index)))
5993 return;
5994
5995 key.key_id = index;
5996 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5997 key.security_type = type;
5998 key.station_index = 0; /* always 0 for BSS */
5999 key.flags = 0;
6000 /* 0 for new key; previous value of counter (after fatal error) */
6001 key.tx_counter[0] = cpu_to_le32(0);
6002 key.tx_counter[1] = cpu_to_le32(0);
6003
6004 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6005 }
6006
6007 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6008 {
6009 struct ipw_wep_key key;
6010 int i;
6011
6012 key.cmd_id = DINO_CMD_WEP_KEY;
6013 key.seq_num = 0;
6014
6015 /* Note: AES keys cannot be set for multiple times.
6016 * Only set it at the first time. */
6017 for (i = 0; i < 4; i++) {
6018 key.key_index = i | type;
6019 if (!(priv->ieee->sec.flags & (1 << i))) {
6020 key.key_size = 0;
6021 continue;
6022 }
6023
6024 key.key_size = priv->ieee->sec.key_sizes[i];
6025 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6026
6027 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6028 }
6029 }
6030
6031 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6032 {
6033 if (priv->ieee->host_encrypt)
6034 return;
6035
6036 switch (level) {
6037 case SEC_LEVEL_3:
6038 priv->sys_config.disable_unicast_decryption = 0;
6039 priv->ieee->host_decrypt = 0;
6040 break;
6041 case SEC_LEVEL_2:
6042 priv->sys_config.disable_unicast_decryption = 1;
6043 priv->ieee->host_decrypt = 1;
6044 break;
6045 case SEC_LEVEL_1:
6046 priv->sys_config.disable_unicast_decryption = 0;
6047 priv->ieee->host_decrypt = 0;
6048 break;
6049 case SEC_LEVEL_0:
6050 priv->sys_config.disable_unicast_decryption = 1;
6051 break;
6052 default:
6053 break;
6054 }
6055 }
6056
6057 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6058 {
6059 if (priv->ieee->host_encrypt)
6060 return;
6061
6062 switch (level) {
6063 case SEC_LEVEL_3:
6064 priv->sys_config.disable_multicast_decryption = 0;
6065 break;
6066 case SEC_LEVEL_2:
6067 priv->sys_config.disable_multicast_decryption = 1;
6068 break;
6069 case SEC_LEVEL_1:
6070 priv->sys_config.disable_multicast_decryption = 0;
6071 break;
6072 case SEC_LEVEL_0:
6073 priv->sys_config.disable_multicast_decryption = 1;
6074 break;
6075 default:
6076 break;
6077 }
6078 }
6079
6080 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6081 {
6082 switch (priv->ieee->sec.level) {
6083 case SEC_LEVEL_3:
6084 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6085 ipw_send_tgi_tx_key(priv,
6086 DCT_FLAG_EXT_SECURITY_CCM,
6087 priv->ieee->sec.active_key);
6088
6089 if (!priv->ieee->host_mc_decrypt)
6090 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6091 break;
6092 case SEC_LEVEL_2:
6093 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6094 ipw_send_tgi_tx_key(priv,
6095 DCT_FLAG_EXT_SECURITY_TKIP,
6096 priv->ieee->sec.active_key);
6097 break;
6098 case SEC_LEVEL_1:
6099 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6100 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6101 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6102 break;
6103 case SEC_LEVEL_0:
6104 default:
6105 break;
6106 }
6107 }
6108
6109 static void ipw_adhoc_check(void *data)
6110 {
6111 struct ipw_priv *priv = data;
6112
6113 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6114 !(priv->config & CFG_ADHOC_PERSIST)) {
6115 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6116 IPW_DL_STATE | IPW_DL_ASSOC,
6117 "Missed beacon: %d - disassociate\n",
6118 priv->missed_adhoc_beacons);
6119 ipw_remove_current_network(priv);
6120 ipw_disassociate(priv);
6121 return;
6122 }
6123
6124 schedule_delayed_work(&priv->adhoc_check,
6125 le16_to_cpu(priv->assoc_request.beacon_interval));
6126 }
6127
6128 static void ipw_bg_adhoc_check(struct work_struct *work)
6129 {
6130 struct ipw_priv *priv =
6131 container_of(work, struct ipw_priv, adhoc_check.work);
6132 mutex_lock(&priv->mutex);
6133 ipw_adhoc_check(priv);
6134 mutex_unlock(&priv->mutex);
6135 }
6136
6137 static void ipw_debug_config(struct ipw_priv *priv)
6138 {
6139 DECLARE_SSID_BUF(ssid);
6140 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6141 "[CFG 0x%08X]\n", priv->config);
6142 if (priv->config & CFG_STATIC_CHANNEL)
6143 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6144 else
6145 IPW_DEBUG_INFO("Channel unlocked.\n");
6146 if (priv->config & CFG_STATIC_ESSID)
6147 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6148 print_ssid(ssid, priv->essid, priv->essid_len));
6149 else
6150 IPW_DEBUG_INFO("ESSID unlocked.\n");
6151 if (priv->config & CFG_STATIC_BSSID)
6152 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6153 else
6154 IPW_DEBUG_INFO("BSSID unlocked.\n");
6155 if (priv->capability & CAP_PRIVACY_ON)
6156 IPW_DEBUG_INFO("PRIVACY on\n");
6157 else
6158 IPW_DEBUG_INFO("PRIVACY off\n");
6159 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6160 }
6161
6162 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6163 {
6164 /* TODO: Verify that this works... */
6165 struct ipw_fixed_rate fr;
6166 u32 reg;
6167 u16 mask = 0;
6168 u16 new_tx_rates = priv->rates_mask;
6169
6170 /* Identify 'current FW band' and match it with the fixed
6171 * Tx rates */
6172
6173 switch (priv->ieee->freq_band) {
6174 case LIBIPW_52GHZ_BAND: /* A only */
6175 /* IEEE_A */
6176 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6177 /* Invalid fixed rate mask */
6178 IPW_DEBUG_WX
6179 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6180 new_tx_rates = 0;
6181 break;
6182 }
6183
6184 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6185 break;
6186
6187 default: /* 2.4Ghz or Mixed */
6188 /* IEEE_B */
6189 if (mode == IEEE_B) {
6190 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6191 /* Invalid fixed rate mask */
6192 IPW_DEBUG_WX
6193 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6194 new_tx_rates = 0;
6195 }
6196 break;
6197 }
6198
6199 /* IEEE_G */
6200 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6201 LIBIPW_OFDM_RATES_MASK)) {
6202 /* Invalid fixed rate mask */
6203 IPW_DEBUG_WX
6204 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6205 new_tx_rates = 0;
6206 break;
6207 }
6208
6209 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6210 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6211 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6212 }
6213
6214 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6215 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6216 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6217 }
6218
6219 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6220 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6221 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6222 }
6223
6224 new_tx_rates |= mask;
6225 break;
6226 }
6227
6228 fr.tx_rates = cpu_to_le16(new_tx_rates);
6229
6230 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6231 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6232 }
6233
6234 static void ipw_abort_scan(struct ipw_priv *priv)
6235 {
6236 int err;
6237
6238 if (priv->status & STATUS_SCAN_ABORTING) {
6239 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6240 return;
6241 }
6242 priv->status |= STATUS_SCAN_ABORTING;
6243
6244 err = ipw_send_scan_abort(priv);
6245 if (err)
6246 IPW_DEBUG_HC("Request to abort scan failed.\n");
6247 }
6248
6249 static void ipw_add_scan_channels(struct ipw_priv *priv,
6250 struct ipw_scan_request_ext *scan,
6251 int scan_type)
6252 {
6253 int channel_index = 0;
6254 const struct libipw_geo *geo;
6255 int i;
6256
6257 geo = libipw_get_geo(priv->ieee);
6258
6259 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6260 int start = channel_index;
6261 for (i = 0; i < geo->a_channels; i++) {
6262 if ((priv->status & STATUS_ASSOCIATED) &&
6263 geo->a[i].channel == priv->channel)
6264 continue;
6265 channel_index++;
6266 scan->channels_list[channel_index] = geo->a[i].channel;
6267 ipw_set_scan_type(scan, channel_index,
6268 geo->a[i].
6269 flags & LIBIPW_CH_PASSIVE_ONLY ?
6270 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6271 scan_type);
6272 }
6273
6274 if (start != channel_index) {
6275 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6276 (channel_index - start);
6277 channel_index++;
6278 }
6279 }
6280
6281 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6282 int start = channel_index;
6283 if (priv->config & CFG_SPEED_SCAN) {
6284 int index;
6285 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6286 /* nop out the list */
6287 [0] = 0
6288 };
6289
6290 u8 channel;
6291 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6292 channel =
6293 priv->speed_scan[priv->speed_scan_pos];
6294 if (channel == 0) {
6295 priv->speed_scan_pos = 0;
6296 channel = priv->speed_scan[0];
6297 }
6298 if ((priv->status & STATUS_ASSOCIATED) &&
6299 channel == priv->channel) {
6300 priv->speed_scan_pos++;
6301 continue;
6302 }
6303
6304 /* If this channel has already been
6305 * added in scan, break from loop
6306 * and this will be the first channel
6307 * in the next scan.
6308 */
6309 if (channels[channel - 1] != 0)
6310 break;
6311
6312 channels[channel - 1] = 1;
6313 priv->speed_scan_pos++;
6314 channel_index++;
6315 scan->channels_list[channel_index] = channel;
6316 index =
6317 libipw_channel_to_index(priv->ieee, channel);
6318 ipw_set_scan_type(scan, channel_index,
6319 geo->bg[index].
6320 flags &
6321 LIBIPW_CH_PASSIVE_ONLY ?
6322 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6323 : scan_type);
6324 }
6325 } else {
6326 for (i = 0; i < geo->bg_channels; i++) {
6327 if ((priv->status & STATUS_ASSOCIATED) &&
6328 geo->bg[i].channel == priv->channel)
6329 continue;
6330 channel_index++;
6331 scan->channels_list[channel_index] =
6332 geo->bg[i].channel;
6333 ipw_set_scan_type(scan, channel_index,
6334 geo->bg[i].
6335 flags &
6336 LIBIPW_CH_PASSIVE_ONLY ?
6337 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6338 : scan_type);
6339 }
6340 }
6341
6342 if (start != channel_index) {
6343 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6344 (channel_index - start);
6345 }
6346 }
6347 }
6348
6349 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6350 {
6351 /* staying on passive channels longer than the DTIM interval during a
6352 * scan, while associated, causes the firmware to cancel the scan
6353 * without notification. Hence, don't stay on passive channels longer
6354 * than the beacon interval.
6355 */
6356 if (priv->status & STATUS_ASSOCIATED
6357 && priv->assoc_network->beacon_interval > 10)
6358 return priv->assoc_network->beacon_interval - 10;
6359 else
6360 return 120;
6361 }
6362
6363 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6364 {
6365 struct ipw_scan_request_ext scan;
6366 int err = 0, scan_type;
6367
6368 if (!(priv->status & STATUS_INIT) ||
6369 (priv->status & STATUS_EXIT_PENDING))
6370 return 0;
6371
6372 mutex_lock(&priv->mutex);
6373
6374 if (direct && (priv->direct_scan_ssid_len == 0)) {
6375 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6376 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6377 goto done;
6378 }
6379
6380 if (priv->status & STATUS_SCANNING) {
6381 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6382 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6383 STATUS_SCAN_PENDING;
6384 goto done;
6385 }
6386
6387 if (!(priv->status & STATUS_SCAN_FORCED) &&
6388 priv->status & STATUS_SCAN_ABORTING) {
6389 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6390 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6391 STATUS_SCAN_PENDING;
6392 goto done;
6393 }
6394
6395 if (priv->status & STATUS_RF_KILL_MASK) {
6396 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6397 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6398 STATUS_SCAN_PENDING;
6399 goto done;
6400 }
6401
6402 memset(&scan, 0, sizeof(scan));
6403 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6404
6405 if (type == IW_SCAN_TYPE_PASSIVE) {
6406 IPW_DEBUG_WX("use passive scanning\n");
6407 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6408 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6409 cpu_to_le16(ipw_passive_dwell_time(priv));
6410 ipw_add_scan_channels(priv, &scan, scan_type);
6411 goto send_request;
6412 }
6413
6414 /* Use active scan by default. */
6415 if (priv->config & CFG_SPEED_SCAN)
6416 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6417 cpu_to_le16(30);
6418 else
6419 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6420 cpu_to_le16(20);
6421
6422 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6423 cpu_to_le16(20);
6424
6425 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6426 cpu_to_le16(ipw_passive_dwell_time(priv));
6427 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6428
6429 #ifdef CONFIG_IPW2200_MONITOR
6430 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6431 u8 channel;
6432 u8 band = 0;
6433
6434 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6435 case LIBIPW_52GHZ_BAND:
6436 band = (u8) (IPW_A_MODE << 6) | 1;
6437 channel = priv->channel;
6438 break;
6439
6440 case LIBIPW_24GHZ_BAND:
6441 band = (u8) (IPW_B_MODE << 6) | 1;
6442 channel = priv->channel;
6443 break;
6444
6445 default:
6446 band = (u8) (IPW_B_MODE << 6) | 1;
6447 channel = 9;
6448 break;
6449 }
6450
6451 scan.channels_list[0] = band;
6452 scan.channels_list[1] = channel;
6453 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6454
6455 /* NOTE: The card will sit on this channel for this time
6456 * period. Scan aborts are timing sensitive and frequently
6457 * result in firmware restarts. As such, it is best to
6458 * set a small dwell_time here and just keep re-issuing
6459 * scans. Otherwise fast channel hopping will not actually
6460 * hop channels.
6461 *
6462 * TODO: Move SPEED SCAN support to all modes and bands */
6463 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6464 cpu_to_le16(2000);
6465 } else {
6466 #endif /* CONFIG_IPW2200_MONITOR */
6467 /* Honor direct scans first, otherwise if we are roaming make
6468 * this a direct scan for the current network. Finally,
6469 * ensure that every other scan is a fast channel hop scan */
6470 if (direct) {
6471 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6472 priv->direct_scan_ssid_len);
6473 if (err) {
6474 IPW_DEBUG_HC("Attempt to send SSID command "
6475 "failed\n");
6476 goto done;
6477 }
6478
6479 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6480 } else if ((priv->status & STATUS_ROAMING)
6481 || (!(priv->status & STATUS_ASSOCIATED)
6482 && (priv->config & CFG_STATIC_ESSID)
6483 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6484 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6485 if (err) {
6486 IPW_DEBUG_HC("Attempt to send SSID command "
6487 "failed.\n");
6488 goto done;
6489 }
6490
6491 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6492 } else
6493 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6494
6495 ipw_add_scan_channels(priv, &scan, scan_type);
6496 #ifdef CONFIG_IPW2200_MONITOR
6497 }
6498 #endif
6499
6500 send_request:
6501 err = ipw_send_scan_request_ext(priv, &scan);
6502 if (err) {
6503 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6504 goto done;
6505 }
6506
6507 priv->status |= STATUS_SCANNING;
6508 if (direct) {
6509 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6510 priv->direct_scan_ssid_len = 0;
6511 } else
6512 priv->status &= ~STATUS_SCAN_PENDING;
6513
6514 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6515 done:
6516 mutex_unlock(&priv->mutex);
6517 return err;
6518 }
6519
6520 static void ipw_request_passive_scan(struct work_struct *work)
6521 {
6522 struct ipw_priv *priv =
6523 container_of(work, struct ipw_priv, request_passive_scan.work);
6524 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6525 }
6526
6527 static void ipw_request_scan(struct work_struct *work)
6528 {
6529 struct ipw_priv *priv =
6530 container_of(work, struct ipw_priv, request_scan.work);
6531 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6532 }
6533
6534 static void ipw_request_direct_scan(struct work_struct *work)
6535 {
6536 struct ipw_priv *priv =
6537 container_of(work, struct ipw_priv, request_direct_scan.work);
6538 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6539 }
6540
6541 static void ipw_bg_abort_scan(struct work_struct *work)
6542 {
6543 struct ipw_priv *priv =
6544 container_of(work, struct ipw_priv, abort_scan);
6545 mutex_lock(&priv->mutex);
6546 ipw_abort_scan(priv);
6547 mutex_unlock(&priv->mutex);
6548 }
6549
6550 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6551 {
6552 /* This is called when wpa_supplicant loads and closes the driver
6553 * interface. */
6554 priv->ieee->wpa_enabled = value;
6555 return 0;
6556 }
6557
6558 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6559 {
6560 struct libipw_device *ieee = priv->ieee;
6561 struct libipw_security sec = {
6562 .flags = SEC_AUTH_MODE,
6563 };
6564 int ret = 0;
6565
6566 if (value & IW_AUTH_ALG_SHARED_KEY) {
6567 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6568 ieee->open_wep = 0;
6569 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6570 sec.auth_mode = WLAN_AUTH_OPEN;
6571 ieee->open_wep = 1;
6572 } else if (value & IW_AUTH_ALG_LEAP) {
6573 sec.auth_mode = WLAN_AUTH_LEAP;
6574 ieee->open_wep = 1;
6575 } else
6576 return -EINVAL;
6577
6578 if (ieee->set_security)
6579 ieee->set_security(ieee->dev, &sec);
6580 else
6581 ret = -EOPNOTSUPP;
6582
6583 return ret;
6584 }
6585
6586 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6587 int wpa_ie_len)
6588 {
6589 /* make sure WPA is enabled */
6590 ipw_wpa_enable(priv, 1);
6591 }
6592
6593 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6594 char *capabilities, int length)
6595 {
6596 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6597
6598 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6599 capabilities);
6600 }
6601
6602 /*
6603 * WE-18 support
6604 */
6605
6606 /* SIOCSIWGENIE */
6607 static int ipw_wx_set_genie(struct net_device *dev,
6608 struct iw_request_info *info,
6609 union iwreq_data *wrqu, char *extra)
6610 {
6611 struct ipw_priv *priv = libipw_priv(dev);
6612 struct libipw_device *ieee = priv->ieee;
6613 u8 *buf;
6614 int err = 0;
6615
6616 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6617 (wrqu->data.length && extra == NULL))
6618 return -EINVAL;
6619
6620 if (wrqu->data.length) {
6621 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6622 if (buf == NULL) {
6623 err = -ENOMEM;
6624 goto out;
6625 }
6626
6627 kfree(ieee->wpa_ie);
6628 ieee->wpa_ie = buf;
6629 ieee->wpa_ie_len = wrqu->data.length;
6630 } else {
6631 kfree(ieee->wpa_ie);
6632 ieee->wpa_ie = NULL;
6633 ieee->wpa_ie_len = 0;
6634 }
6635
6636 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6637 out:
6638 return err;
6639 }
6640
6641 /* SIOCGIWGENIE */
6642 static int ipw_wx_get_genie(struct net_device *dev,
6643 struct iw_request_info *info,
6644 union iwreq_data *wrqu, char *extra)
6645 {
6646 struct ipw_priv *priv = libipw_priv(dev);
6647 struct libipw_device *ieee = priv->ieee;
6648 int err = 0;
6649
6650 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6651 wrqu->data.length = 0;
6652 goto out;
6653 }
6654
6655 if (wrqu->data.length < ieee->wpa_ie_len) {
6656 err = -E2BIG;
6657 goto out;
6658 }
6659
6660 wrqu->data.length = ieee->wpa_ie_len;
6661 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6662
6663 out:
6664 return err;
6665 }
6666
6667 static int wext_cipher2level(int cipher)
6668 {
6669 switch (cipher) {
6670 case IW_AUTH_CIPHER_NONE:
6671 return SEC_LEVEL_0;
6672 case IW_AUTH_CIPHER_WEP40:
6673 case IW_AUTH_CIPHER_WEP104:
6674 return SEC_LEVEL_1;
6675 case IW_AUTH_CIPHER_TKIP:
6676 return SEC_LEVEL_2;
6677 case IW_AUTH_CIPHER_CCMP:
6678 return SEC_LEVEL_3;
6679 default:
6680 return -1;
6681 }
6682 }
6683
6684 /* SIOCSIWAUTH */
6685 static int ipw_wx_set_auth(struct net_device *dev,
6686 struct iw_request_info *info,
6687 union iwreq_data *wrqu, char *extra)
6688 {
6689 struct ipw_priv *priv = libipw_priv(dev);
6690 struct libipw_device *ieee = priv->ieee;
6691 struct iw_param *param = &wrqu->param;
6692 struct lib80211_crypt_data *crypt;
6693 unsigned long flags;
6694 int ret = 0;
6695
6696 switch (param->flags & IW_AUTH_INDEX) {
6697 case IW_AUTH_WPA_VERSION:
6698 break;
6699 case IW_AUTH_CIPHER_PAIRWISE:
6700 ipw_set_hw_decrypt_unicast(priv,
6701 wext_cipher2level(param->value));
6702 break;
6703 case IW_AUTH_CIPHER_GROUP:
6704 ipw_set_hw_decrypt_multicast(priv,
6705 wext_cipher2level(param->value));
6706 break;
6707 case IW_AUTH_KEY_MGMT:
6708 /*
6709 * ipw2200 does not use these parameters
6710 */
6711 break;
6712
6713 case IW_AUTH_TKIP_COUNTERMEASURES:
6714 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6715 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6716 break;
6717
6718 flags = crypt->ops->get_flags(crypt->priv);
6719
6720 if (param->value)
6721 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6722 else
6723 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6724
6725 crypt->ops->set_flags(flags, crypt->priv);
6726
6727 break;
6728
6729 case IW_AUTH_DROP_UNENCRYPTED:{
6730 /* HACK:
6731 *
6732 * wpa_supplicant calls set_wpa_enabled when the driver
6733 * is loaded and unloaded, regardless of if WPA is being
6734 * used. No other calls are made which can be used to
6735 * determine if encryption will be used or not prior to
6736 * association being expected. If encryption is not being
6737 * used, drop_unencrypted is set to false, else true -- we
6738 * can use this to determine if the CAP_PRIVACY_ON bit should
6739 * be set.
6740 */
6741 struct libipw_security sec = {
6742 .flags = SEC_ENABLED,
6743 .enabled = param->value,
6744 };
6745 priv->ieee->drop_unencrypted = param->value;
6746 /* We only change SEC_LEVEL for open mode. Others
6747 * are set by ipw_wpa_set_encryption.
6748 */
6749 if (!param->value) {
6750 sec.flags |= SEC_LEVEL;
6751 sec.level = SEC_LEVEL_0;
6752 } else {
6753 sec.flags |= SEC_LEVEL;
6754 sec.level = SEC_LEVEL_1;
6755 }
6756 if (priv->ieee->set_security)
6757 priv->ieee->set_security(priv->ieee->dev, &sec);
6758 break;
6759 }
6760
6761 case IW_AUTH_80211_AUTH_ALG:
6762 ret = ipw_wpa_set_auth_algs(priv, param->value);
6763 break;
6764
6765 case IW_AUTH_WPA_ENABLED:
6766 ret = ipw_wpa_enable(priv, param->value);
6767 ipw_disassociate(priv);
6768 break;
6769
6770 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6771 ieee->ieee802_1x = param->value;
6772 break;
6773
6774 case IW_AUTH_PRIVACY_INVOKED:
6775 ieee->privacy_invoked = param->value;
6776 break;
6777
6778 default:
6779 return -EOPNOTSUPP;
6780 }
6781 return ret;
6782 }
6783
6784 /* SIOCGIWAUTH */
6785 static int ipw_wx_get_auth(struct net_device *dev,
6786 struct iw_request_info *info,
6787 union iwreq_data *wrqu, char *extra)
6788 {
6789 struct ipw_priv *priv = libipw_priv(dev);
6790 struct libipw_device *ieee = priv->ieee;
6791 struct lib80211_crypt_data *crypt;
6792 struct iw_param *param = &wrqu->param;
6793 int ret = 0;
6794
6795 switch (param->flags & IW_AUTH_INDEX) {
6796 case IW_AUTH_WPA_VERSION:
6797 case IW_AUTH_CIPHER_PAIRWISE:
6798 case IW_AUTH_CIPHER_GROUP:
6799 case IW_AUTH_KEY_MGMT:
6800 /*
6801 * wpa_supplicant will control these internally
6802 */
6803 ret = -EOPNOTSUPP;
6804 break;
6805
6806 case IW_AUTH_TKIP_COUNTERMEASURES:
6807 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6808 if (!crypt || !crypt->ops->get_flags)
6809 break;
6810
6811 param->value = (crypt->ops->get_flags(crypt->priv) &
6812 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6813
6814 break;
6815
6816 case IW_AUTH_DROP_UNENCRYPTED:
6817 param->value = ieee->drop_unencrypted;
6818 break;
6819
6820 case IW_AUTH_80211_AUTH_ALG:
6821 param->value = ieee->sec.auth_mode;
6822 break;
6823
6824 case IW_AUTH_WPA_ENABLED:
6825 param->value = ieee->wpa_enabled;
6826 break;
6827
6828 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6829 param->value = ieee->ieee802_1x;
6830 break;
6831
6832 case IW_AUTH_ROAMING_CONTROL:
6833 case IW_AUTH_PRIVACY_INVOKED:
6834 param->value = ieee->privacy_invoked;
6835 break;
6836
6837 default:
6838 return -EOPNOTSUPP;
6839 }
6840 return 0;
6841 }
6842
6843 /* SIOCSIWENCODEEXT */
6844 static int ipw_wx_set_encodeext(struct net_device *dev,
6845 struct iw_request_info *info,
6846 union iwreq_data *wrqu, char *extra)
6847 {
6848 struct ipw_priv *priv = libipw_priv(dev);
6849 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6850
6851 if (hwcrypto) {
6852 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6853 /* IPW HW can't build TKIP MIC,
6854 host decryption still needed */
6855 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6856 priv->ieee->host_mc_decrypt = 1;
6857 else {
6858 priv->ieee->host_encrypt = 0;
6859 priv->ieee->host_encrypt_msdu = 1;
6860 priv->ieee->host_decrypt = 1;
6861 }
6862 } else {
6863 priv->ieee->host_encrypt = 0;
6864 priv->ieee->host_encrypt_msdu = 0;
6865 priv->ieee->host_decrypt = 0;
6866 priv->ieee->host_mc_decrypt = 0;
6867 }
6868 }
6869
6870 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6871 }
6872
6873 /* SIOCGIWENCODEEXT */
6874 static int ipw_wx_get_encodeext(struct net_device *dev,
6875 struct iw_request_info *info,
6876 union iwreq_data *wrqu, char *extra)
6877 {
6878 struct ipw_priv *priv = libipw_priv(dev);
6879 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6880 }
6881
6882 /* SIOCSIWMLME */
6883 static int ipw_wx_set_mlme(struct net_device *dev,
6884 struct iw_request_info *info,
6885 union iwreq_data *wrqu, char *extra)
6886 {
6887 struct ipw_priv *priv = libipw_priv(dev);
6888 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6889 __le16 reason;
6890
6891 reason = cpu_to_le16(mlme->reason_code);
6892
6893 switch (mlme->cmd) {
6894 case IW_MLME_DEAUTH:
6895 /* silently ignore */
6896 break;
6897
6898 case IW_MLME_DISASSOC:
6899 ipw_disassociate(priv);
6900 break;
6901
6902 default:
6903 return -EOPNOTSUPP;
6904 }
6905 return 0;
6906 }
6907
6908 #ifdef CONFIG_IPW2200_QOS
6909
6910 /* QoS */
6911 /*
6912 * get the modulation type of the current network or
6913 * the card current mode
6914 */
6915 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6916 {
6917 u8 mode = 0;
6918
6919 if (priv->status & STATUS_ASSOCIATED) {
6920 unsigned long flags;
6921
6922 spin_lock_irqsave(&priv->ieee->lock, flags);
6923 mode = priv->assoc_network->mode;
6924 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6925 } else {
6926 mode = priv->ieee->mode;
6927 }
6928 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6929 return mode;
6930 }
6931
6932 /*
6933 * Handle management frame beacon and probe response
6934 */
6935 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6936 int active_network,
6937 struct libipw_network *network)
6938 {
6939 u32 size = sizeof(struct libipw_qos_parameters);
6940
6941 if (network->capability & WLAN_CAPABILITY_IBSS)
6942 network->qos_data.active = network->qos_data.supported;
6943
6944 if (network->flags & NETWORK_HAS_QOS_MASK) {
6945 if (active_network &&
6946 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6947 network->qos_data.active = network->qos_data.supported;
6948
6949 if ((network->qos_data.active == 1) && (active_network == 1) &&
6950 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6951 (network->qos_data.old_param_count !=
6952 network->qos_data.param_count)) {
6953 network->qos_data.old_param_count =
6954 network->qos_data.param_count;
6955 schedule_work(&priv->qos_activate);
6956 IPW_DEBUG_QOS("QoS parameters change call "
6957 "qos_activate\n");
6958 }
6959 } else {
6960 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6961 memcpy(&network->qos_data.parameters,
6962 &def_parameters_CCK, size);
6963 else
6964 memcpy(&network->qos_data.parameters,
6965 &def_parameters_OFDM, size);
6966
6967 if ((network->qos_data.active == 1) && (active_network == 1)) {
6968 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6969 schedule_work(&priv->qos_activate);
6970 }
6971
6972 network->qos_data.active = 0;
6973 network->qos_data.supported = 0;
6974 }
6975 if ((priv->status & STATUS_ASSOCIATED) &&
6976 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6977 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6978 if (network->capability & WLAN_CAPABILITY_IBSS)
6979 if ((network->ssid_len ==
6980 priv->assoc_network->ssid_len) &&
6981 !memcmp(network->ssid,
6982 priv->assoc_network->ssid,
6983 network->ssid_len)) {
6984 schedule_work(&priv->merge_networks);
6985 }
6986 }
6987
6988 return 0;
6989 }
6990
6991 /*
6992 * This function set up the firmware to support QoS. It sends
6993 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6994 */
6995 static int ipw_qos_activate(struct ipw_priv *priv,
6996 struct libipw_qos_data *qos_network_data)
6997 {
6998 int err;
6999 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7000 struct libipw_qos_parameters *active_one = NULL;
7001 u32 size = sizeof(struct libipw_qos_parameters);
7002 u32 burst_duration;
7003 int i;
7004 u8 type;
7005
7006 type = ipw_qos_current_mode(priv);
7007
7008 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7009 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7010 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7011 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7012
7013 if (qos_network_data == NULL) {
7014 if (type == IEEE_B) {
7015 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7016 active_one = &def_parameters_CCK;
7017 } else
7018 active_one = &def_parameters_OFDM;
7019
7020 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7021 burst_duration = ipw_qos_get_burst_duration(priv);
7022 for (i = 0; i < QOS_QUEUE_NUM; i++)
7023 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7024 cpu_to_le16(burst_duration);
7025 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7026 if (type == IEEE_B) {
7027 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
7028 type);
7029 if (priv->qos_data.qos_enable == 0)
7030 active_one = &def_parameters_CCK;
7031 else
7032 active_one = priv->qos_data.def_qos_parm_CCK;
7033 } else {
7034 if (priv->qos_data.qos_enable == 0)
7035 active_one = &def_parameters_OFDM;
7036 else
7037 active_one = priv->qos_data.def_qos_parm_OFDM;
7038 }
7039 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7040 } else {
7041 unsigned long flags;
7042 int active;
7043
7044 spin_lock_irqsave(&priv->ieee->lock, flags);
7045 active_one = &(qos_network_data->parameters);
7046 qos_network_data->old_param_count =
7047 qos_network_data->param_count;
7048 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7049 active = qos_network_data->supported;
7050 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7051
7052 if (active == 0) {
7053 burst_duration = ipw_qos_get_burst_duration(priv);
7054 for (i = 0; i < QOS_QUEUE_NUM; i++)
7055 qos_parameters[QOS_PARAM_SET_ACTIVE].
7056 tx_op_limit[i] = cpu_to_le16(burst_duration);
7057 }
7058 }
7059
7060 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7061 err = ipw_send_qos_params_command(priv,
7062 (struct libipw_qos_parameters *)
7063 &(qos_parameters[0]));
7064 if (err)
7065 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7066
7067 return err;
7068 }
7069
7070 /*
7071 * send IPW_CMD_WME_INFO to the firmware
7072 */
7073 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7074 {
7075 int ret = 0;
7076 struct libipw_qos_information_element qos_info;
7077
7078 if (priv == NULL)
7079 return -1;
7080
7081 qos_info.elementID = QOS_ELEMENT_ID;
7082 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7083
7084 qos_info.version = QOS_VERSION_1;
7085 qos_info.ac_info = 0;
7086
7087 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7088 qos_info.qui_type = QOS_OUI_TYPE;
7089 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7090
7091 ret = ipw_send_qos_info_command(priv, &qos_info);
7092 if (ret != 0) {
7093 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7094 }
7095 return ret;
7096 }
7097
7098 /*
7099 * Set the QoS parameter with the association request structure
7100 */
7101 static int ipw_qos_association(struct ipw_priv *priv,
7102 struct libipw_network *network)
7103 {
7104 int err = 0;
7105 struct libipw_qos_data *qos_data = NULL;
7106 struct libipw_qos_data ibss_data = {
7107 .supported = 1,
7108 .active = 1,
7109 };
7110
7111 switch (priv->ieee->iw_mode) {
7112 case IW_MODE_ADHOC:
7113 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7114
7115 qos_data = &ibss_data;
7116 break;
7117
7118 case IW_MODE_INFRA:
7119 qos_data = &network->qos_data;
7120 break;
7121
7122 default:
7123 BUG();
7124 break;
7125 }
7126
7127 err = ipw_qos_activate(priv, qos_data);
7128 if (err) {
7129 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7130 return err;
7131 }
7132
7133 if (priv->qos_data.qos_enable && qos_data->supported) {
7134 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7135 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7136 return ipw_qos_set_info_element(priv);
7137 }
7138
7139 return 0;
7140 }
7141
7142 /*
7143 * handling the beaconing responses. if we get different QoS setting
7144 * off the network from the associated setting, adjust the QoS
7145 * setting
7146 */
7147 static int ipw_qos_association_resp(struct ipw_priv *priv,
7148 struct libipw_network *network)
7149 {
7150 int ret = 0;
7151 unsigned long flags;
7152 u32 size = sizeof(struct libipw_qos_parameters);
7153 int set_qos_param = 0;
7154
7155 if ((priv == NULL) || (network == NULL) ||
7156 (priv->assoc_network == NULL))
7157 return ret;
7158
7159 if (!(priv->status & STATUS_ASSOCIATED))
7160 return ret;
7161
7162 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7163 return ret;
7164
7165 spin_lock_irqsave(&priv->ieee->lock, flags);
7166 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7167 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7168 sizeof(struct libipw_qos_data));
7169 priv->assoc_network->qos_data.active = 1;
7170 if ((network->qos_data.old_param_count !=
7171 network->qos_data.param_count)) {
7172 set_qos_param = 1;
7173 network->qos_data.old_param_count =
7174 network->qos_data.param_count;
7175 }
7176
7177 } else {
7178 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7179 memcpy(&priv->assoc_network->qos_data.parameters,
7180 &def_parameters_CCK, size);
7181 else
7182 memcpy(&priv->assoc_network->qos_data.parameters,
7183 &def_parameters_OFDM, size);
7184 priv->assoc_network->qos_data.active = 0;
7185 priv->assoc_network->qos_data.supported = 0;
7186 set_qos_param = 1;
7187 }
7188
7189 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7190
7191 if (set_qos_param == 1)
7192 schedule_work(&priv->qos_activate);
7193
7194 return ret;
7195 }
7196
7197 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7198 {
7199 u32 ret = 0;
7200
7201 if ((priv == NULL))
7202 return 0;
7203
7204 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7205 ret = priv->qos_data.burst_duration_CCK;
7206 else
7207 ret = priv->qos_data.burst_duration_OFDM;
7208
7209 return ret;
7210 }
7211
7212 /*
7213 * Initialize the setting of QoS global
7214 */
7215 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7216 int burst_enable, u32 burst_duration_CCK,
7217 u32 burst_duration_OFDM)
7218 {
7219 priv->qos_data.qos_enable = enable;
7220
7221 if (priv->qos_data.qos_enable) {
7222 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7223 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7224 IPW_DEBUG_QOS("QoS is enabled\n");
7225 } else {
7226 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7227 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7228 IPW_DEBUG_QOS("QoS is not enabled\n");
7229 }
7230
7231 priv->qos_data.burst_enable = burst_enable;
7232
7233 if (burst_enable) {
7234 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7235 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7236 } else {
7237 priv->qos_data.burst_duration_CCK = 0;
7238 priv->qos_data.burst_duration_OFDM = 0;
7239 }
7240 }
7241
7242 /*
7243 * map the packet priority to the right TX Queue
7244 */
7245 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7246 {
7247 if (priority > 7 || !priv->qos_data.qos_enable)
7248 priority = 0;
7249
7250 return from_priority_to_tx_queue[priority] - 1;
7251 }
7252
7253 static int ipw_is_qos_active(struct net_device *dev,
7254 struct sk_buff *skb)
7255 {
7256 struct ipw_priv *priv = libipw_priv(dev);
7257 struct libipw_qos_data *qos_data = NULL;
7258 int active, supported;
7259 u8 *daddr = skb->data + ETH_ALEN;
7260 int unicast = !is_multicast_ether_addr(daddr);
7261
7262 if (!(priv->status & STATUS_ASSOCIATED))
7263 return 0;
7264
7265 qos_data = &priv->assoc_network->qos_data;
7266
7267 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7268 if (unicast == 0)
7269 qos_data->active = 0;
7270 else
7271 qos_data->active = qos_data->supported;
7272 }
7273 active = qos_data->active;
7274 supported = qos_data->supported;
7275 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7276 "unicast %d\n",
7277 priv->qos_data.qos_enable, active, supported, unicast);
7278 if (active && priv->qos_data.qos_enable)
7279 return 1;
7280
7281 return 0;
7282
7283 }
7284 /*
7285 * add QoS parameter to the TX command
7286 */
7287 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7288 u16 priority,
7289 struct tfd_data *tfd)
7290 {
7291 int tx_queue_id = 0;
7292
7293
7294 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7295 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7296
7297 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7298 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7299 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7300 }
7301 return 0;
7302 }
7303
7304 /*
7305 * background support to run QoS activate functionality
7306 */
7307 static void ipw_bg_qos_activate(struct work_struct *work)
7308 {
7309 struct ipw_priv *priv =
7310 container_of(work, struct ipw_priv, qos_activate);
7311
7312 mutex_lock(&priv->mutex);
7313
7314 if (priv->status & STATUS_ASSOCIATED)
7315 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7316
7317 mutex_unlock(&priv->mutex);
7318 }
7319
7320 static int ipw_handle_probe_response(struct net_device *dev,
7321 struct libipw_probe_response *resp,
7322 struct libipw_network *network)
7323 {
7324 struct ipw_priv *priv = libipw_priv(dev);
7325 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7326 (network == priv->assoc_network));
7327
7328 ipw_qos_handle_probe_response(priv, active_network, network);
7329
7330 return 0;
7331 }
7332
7333 static int ipw_handle_beacon(struct net_device *dev,
7334 struct libipw_beacon *resp,
7335 struct libipw_network *network)
7336 {
7337 struct ipw_priv *priv = libipw_priv(dev);
7338 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7339 (network == priv->assoc_network));
7340
7341 ipw_qos_handle_probe_response(priv, active_network, network);
7342
7343 return 0;
7344 }
7345
7346 static int ipw_handle_assoc_response(struct net_device *dev,
7347 struct libipw_assoc_response *resp,
7348 struct libipw_network *network)
7349 {
7350 struct ipw_priv *priv = libipw_priv(dev);
7351 ipw_qos_association_resp(priv, network);
7352 return 0;
7353 }
7354
7355 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7356 *qos_param)
7357 {
7358 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7359 sizeof(*qos_param) * 3, qos_param);
7360 }
7361
7362 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7363 *qos_param)
7364 {
7365 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7366 qos_param);
7367 }
7368
7369 #endif /* CONFIG_IPW2200_QOS */
7370
7371 static int ipw_associate_network(struct ipw_priv *priv,
7372 struct libipw_network *network,
7373 struct ipw_supported_rates *rates, int roaming)
7374 {
7375 int err;
7376 DECLARE_SSID_BUF(ssid);
7377
7378 if (priv->config & CFG_FIXED_RATE)
7379 ipw_set_fixed_rate(priv, network->mode);
7380
7381 if (!(priv->config & CFG_STATIC_ESSID)) {
7382 priv->essid_len = min(network->ssid_len,
7383 (u8) IW_ESSID_MAX_SIZE);
7384 memcpy(priv->essid, network->ssid, priv->essid_len);
7385 }
7386
7387 network->last_associate = jiffies;
7388
7389 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7390 priv->assoc_request.channel = network->channel;
7391 priv->assoc_request.auth_key = 0;
7392
7393 if ((priv->capability & CAP_PRIVACY_ON) &&
7394 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7395 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7396 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7397
7398 if (priv->ieee->sec.level == SEC_LEVEL_1)
7399 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7400
7401 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7402 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7403 priv->assoc_request.auth_type = AUTH_LEAP;
7404 else
7405 priv->assoc_request.auth_type = AUTH_OPEN;
7406
7407 if (priv->ieee->wpa_ie_len) {
7408 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7409 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7410 priv->ieee->wpa_ie_len);
7411 }
7412
7413 /*
7414 * It is valid for our ieee device to support multiple modes, but
7415 * when it comes to associating to a given network we have to choose
7416 * just one mode.
7417 */
7418 if (network->mode & priv->ieee->mode & IEEE_A)
7419 priv->assoc_request.ieee_mode = IPW_A_MODE;
7420 else if (network->mode & priv->ieee->mode & IEEE_G)
7421 priv->assoc_request.ieee_mode = IPW_G_MODE;
7422 else if (network->mode & priv->ieee->mode & IEEE_B)
7423 priv->assoc_request.ieee_mode = IPW_B_MODE;
7424
7425 priv->assoc_request.capability = cpu_to_le16(network->capability);
7426 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7427 && !(priv->config & CFG_PREAMBLE_LONG)) {
7428 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7429 } else {
7430 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7431
7432 /* Clear the short preamble if we won't be supporting it */
7433 priv->assoc_request.capability &=
7434 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7435 }
7436
7437 /* Clear capability bits that aren't used in Ad Hoc */
7438 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7439 priv->assoc_request.capability &=
7440 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7441
7442 IPW_DEBUG_ASSOC("%ssociation attempt: '%s', channel %d, "
7443 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7444 roaming ? "Rea" : "A",
7445 print_ssid(ssid, priv->essid, priv->essid_len),
7446 network->channel,
7447 ipw_modes[priv->assoc_request.ieee_mode],
7448 rates->num_rates,
7449 (priv->assoc_request.preamble_length ==
7450 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7451 network->capability &
7452 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7453 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7454 priv->capability & CAP_PRIVACY_ON ?
7455 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7456 "(open)") : "",
7457 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7458 priv->capability & CAP_PRIVACY_ON ?
7459 '1' + priv->ieee->sec.active_key : '.',
7460 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7461
7462 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7463 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7464 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7465 priv->assoc_request.assoc_type = HC_IBSS_START;
7466 priv->assoc_request.assoc_tsf_msw = 0;
7467 priv->assoc_request.assoc_tsf_lsw = 0;
7468 } else {
7469 if (unlikely(roaming))
7470 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7471 else
7472 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7473 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7474 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7475 }
7476
7477 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7478
7479 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7480 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7481 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7482 } else {
7483 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7484 priv->assoc_request.atim_window = 0;
7485 }
7486
7487 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7488
7489 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7490 if (err) {
7491 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7492 return err;
7493 }
7494
7495 rates->ieee_mode = priv->assoc_request.ieee_mode;
7496 rates->purpose = IPW_RATE_CONNECT;
7497 ipw_send_supported_rates(priv, rates);
7498
7499 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7500 priv->sys_config.dot11g_auto_detection = 1;
7501 else
7502 priv->sys_config.dot11g_auto_detection = 0;
7503
7504 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7505 priv->sys_config.answer_broadcast_ssid_probe = 1;
7506 else
7507 priv->sys_config.answer_broadcast_ssid_probe = 0;
7508
7509 err = ipw_send_system_config(priv);
7510 if (err) {
7511 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7512 return err;
7513 }
7514
7515 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7516 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7517 if (err) {
7518 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7519 return err;
7520 }
7521
7522 /*
7523 * If preemption is enabled, it is possible for the association
7524 * to complete before we return from ipw_send_associate. Therefore
7525 * we have to be sure and update our priviate data first.
7526 */
7527 priv->channel = network->channel;
7528 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7529 priv->status |= STATUS_ASSOCIATING;
7530 priv->status &= ~STATUS_SECURITY_UPDATED;
7531
7532 priv->assoc_network = network;
7533
7534 #ifdef CONFIG_IPW2200_QOS
7535 ipw_qos_association(priv, network);
7536 #endif
7537
7538 err = ipw_send_associate(priv, &priv->assoc_request);
7539 if (err) {
7540 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7541 return err;
7542 }
7543
7544 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7545 print_ssid(ssid, priv->essid, priv->essid_len),
7546 priv->bssid);
7547
7548 return 0;
7549 }
7550
7551 static void ipw_roam(void *data)
7552 {
7553 struct ipw_priv *priv = data;
7554 struct libipw_network *network = NULL;
7555 struct ipw_network_match match = {
7556 .network = priv->assoc_network
7557 };
7558
7559 /* The roaming process is as follows:
7560 *
7561 * 1. Missed beacon threshold triggers the roaming process by
7562 * setting the status ROAM bit and requesting a scan.
7563 * 2. When the scan completes, it schedules the ROAM work
7564 * 3. The ROAM work looks at all of the known networks for one that
7565 * is a better network than the currently associated. If none
7566 * found, the ROAM process is over (ROAM bit cleared)
7567 * 4. If a better network is found, a disassociation request is
7568 * sent.
7569 * 5. When the disassociation completes, the roam work is again
7570 * scheduled. The second time through, the driver is no longer
7571 * associated, and the newly selected network is sent an
7572 * association request.
7573 * 6. At this point ,the roaming process is complete and the ROAM
7574 * status bit is cleared.
7575 */
7576
7577 /* If we are no longer associated, and the roaming bit is no longer
7578 * set, then we are not actively roaming, so just return */
7579 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7580 return;
7581
7582 if (priv->status & STATUS_ASSOCIATED) {
7583 /* First pass through ROAM process -- look for a better
7584 * network */
7585 unsigned long flags;
7586 u8 rssi = priv->assoc_network->stats.rssi;
7587 priv->assoc_network->stats.rssi = -128;
7588 spin_lock_irqsave(&priv->ieee->lock, flags);
7589 list_for_each_entry(network, &priv->ieee->network_list, list) {
7590 if (network != priv->assoc_network)
7591 ipw_best_network(priv, &match, network, 1);
7592 }
7593 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7594 priv->assoc_network->stats.rssi = rssi;
7595
7596 if (match.network == priv->assoc_network) {
7597 IPW_DEBUG_ASSOC("No better APs in this network to "
7598 "roam to.\n");
7599 priv->status &= ~STATUS_ROAMING;
7600 ipw_debug_config(priv);
7601 return;
7602 }
7603
7604 ipw_send_disassociate(priv, 1);
7605 priv->assoc_network = match.network;
7606
7607 return;
7608 }
7609
7610 /* Second pass through ROAM process -- request association */
7611 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7612 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7613 priv->status &= ~STATUS_ROAMING;
7614 }
7615
7616 static void ipw_bg_roam(struct work_struct *work)
7617 {
7618 struct ipw_priv *priv =
7619 container_of(work, struct ipw_priv, roam);
7620 mutex_lock(&priv->mutex);
7621 ipw_roam(priv);
7622 mutex_unlock(&priv->mutex);
7623 }
7624
7625 static int ipw_associate(void *data)
7626 {
7627 struct ipw_priv *priv = data;
7628
7629 struct libipw_network *network = NULL;
7630 struct ipw_network_match match = {
7631 .network = NULL
7632 };
7633 struct ipw_supported_rates *rates;
7634 struct list_head *element;
7635 unsigned long flags;
7636 DECLARE_SSID_BUF(ssid);
7637
7638 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7639 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7640 return 0;
7641 }
7642
7643 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7644 IPW_DEBUG_ASSOC("Not attempting association (already in "
7645 "progress)\n");
7646 return 0;
7647 }
7648
7649 if (priv->status & STATUS_DISASSOCIATING) {
7650 IPW_DEBUG_ASSOC("Not attempting association (in "
7651 "disassociating)\n ");
7652 schedule_work(&priv->associate);
7653 return 0;
7654 }
7655
7656 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7657 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7658 "initialized)\n");
7659 return 0;
7660 }
7661
7662 if (!(priv->config & CFG_ASSOCIATE) &&
7663 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7664 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7665 return 0;
7666 }
7667
7668 /* Protect our use of the network_list */
7669 spin_lock_irqsave(&priv->ieee->lock, flags);
7670 list_for_each_entry(network, &priv->ieee->network_list, list)
7671 ipw_best_network(priv, &match, network, 0);
7672
7673 network = match.network;
7674 rates = &match.rates;
7675
7676 if (network == NULL &&
7677 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7678 priv->config & CFG_ADHOC_CREATE &&
7679 priv->config & CFG_STATIC_ESSID &&
7680 priv->config & CFG_STATIC_CHANNEL) {
7681 /* Use oldest network if the free list is empty */
7682 if (list_empty(&priv->ieee->network_free_list)) {
7683 struct libipw_network *oldest = NULL;
7684 struct libipw_network *target;
7685
7686 list_for_each_entry(target, &priv->ieee->network_list, list) {
7687 if ((oldest == NULL) ||
7688 (target->last_scanned < oldest->last_scanned))
7689 oldest = target;
7690 }
7691
7692 /* If there are no more slots, expire the oldest */
7693 list_del(&oldest->list);
7694 target = oldest;
7695 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7696 "network list.\n",
7697 print_ssid(ssid, target->ssid,
7698 target->ssid_len),
7699 target->bssid);
7700 list_add_tail(&target->list,
7701 &priv->ieee->network_free_list);
7702 }
7703
7704 element = priv->ieee->network_free_list.next;
7705 network = list_entry(element, struct libipw_network, list);
7706 ipw_adhoc_create(priv, network);
7707 rates = &priv->rates;
7708 list_del(element);
7709 list_add_tail(&network->list, &priv->ieee->network_list);
7710 }
7711 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7712
7713 /* If we reached the end of the list, then we don't have any valid
7714 * matching APs */
7715 if (!network) {
7716 ipw_debug_config(priv);
7717
7718 if (!(priv->status & STATUS_SCANNING)) {
7719 if (!(priv->config & CFG_SPEED_SCAN))
7720 schedule_delayed_work(&priv->request_scan,
7721 SCAN_INTERVAL);
7722 else
7723 schedule_delayed_work(&priv->request_scan, 0);
7724 }
7725
7726 return 0;
7727 }
7728
7729 ipw_associate_network(priv, network, rates, 0);
7730
7731 return 1;
7732 }
7733
7734 static void ipw_bg_associate(struct work_struct *work)
7735 {
7736 struct ipw_priv *priv =
7737 container_of(work, struct ipw_priv, associate);
7738 mutex_lock(&priv->mutex);
7739 ipw_associate(priv);
7740 mutex_unlock(&priv->mutex);
7741 }
7742
7743 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7744 struct sk_buff *skb)
7745 {
7746 struct ieee80211_hdr *hdr;
7747 u16 fc;
7748
7749 hdr = (struct ieee80211_hdr *)skb->data;
7750 fc = le16_to_cpu(hdr->frame_control);
7751 if (!(fc & IEEE80211_FCTL_PROTECTED))
7752 return;
7753
7754 fc &= ~IEEE80211_FCTL_PROTECTED;
7755 hdr->frame_control = cpu_to_le16(fc);
7756 switch (priv->ieee->sec.level) {
7757 case SEC_LEVEL_3:
7758 /* Remove CCMP HDR */
7759 memmove(skb->data + LIBIPW_3ADDR_LEN,
7760 skb->data + LIBIPW_3ADDR_LEN + 8,
7761 skb->len - LIBIPW_3ADDR_LEN - 8);
7762 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7763 break;
7764 case SEC_LEVEL_2:
7765 break;
7766 case SEC_LEVEL_1:
7767 /* Remove IV */
7768 memmove(skb->data + LIBIPW_3ADDR_LEN,
7769 skb->data + LIBIPW_3ADDR_LEN + 4,
7770 skb->len - LIBIPW_3ADDR_LEN - 4);
7771 skb_trim(skb, skb->len - 8); /* IV + ICV */
7772 break;
7773 case SEC_LEVEL_0:
7774 break;
7775 default:
7776 printk(KERN_ERR "Unknown security level %d\n",
7777 priv->ieee->sec.level);
7778 break;
7779 }
7780 }
7781
7782 static void ipw_handle_data_packet(struct ipw_priv *priv,
7783 struct ipw_rx_mem_buffer *rxb,
7784 struct libipw_rx_stats *stats)
7785 {
7786 struct net_device *dev = priv->net_dev;
7787 struct libipw_hdr_4addr *hdr;
7788 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7789
7790 /* We received data from the HW, so stop the watchdog */
7791 dev->trans_start = jiffies;
7792
7793 /* We only process data packets if the
7794 * interface is open */
7795 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7796 skb_tailroom(rxb->skb))) {
7797 dev->stats.rx_errors++;
7798 priv->wstats.discard.misc++;
7799 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7800 return;
7801 } else if (unlikely(!netif_running(priv->net_dev))) {
7802 dev->stats.rx_dropped++;
7803 priv->wstats.discard.misc++;
7804 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7805 return;
7806 }
7807
7808 /* Advance skb->data to the start of the actual payload */
7809 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7810
7811 /* Set the size of the skb to the size of the frame */
7812 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7813
7814 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7815
7816 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7817 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7818 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7819 (is_multicast_ether_addr(hdr->addr1) ?
7820 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7821 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7822
7823 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7824 dev->stats.rx_errors++;
7825 else { /* libipw_rx succeeded, so it now owns the SKB */
7826 rxb->skb = NULL;
7827 __ipw_led_activity_on(priv);
7828 }
7829 }
7830
7831 #ifdef CONFIG_IPW2200_RADIOTAP
7832 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7833 struct ipw_rx_mem_buffer *rxb,
7834 struct libipw_rx_stats *stats)
7835 {
7836 struct net_device *dev = priv->net_dev;
7837 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7838 struct ipw_rx_frame *frame = &pkt->u.frame;
7839
7840 /* initial pull of some data */
7841 u16 received_channel = frame->received_channel;
7842 u8 antennaAndPhy = frame->antennaAndPhy;
7843 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7844 u16 pktrate = frame->rate;
7845
7846 /* Magic struct that slots into the radiotap header -- no reason
7847 * to build this manually element by element, we can write it much
7848 * more efficiently than we can parse it. ORDER MATTERS HERE */
7849 struct ipw_rt_hdr *ipw_rt;
7850
7851 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7852
7853 /* We received data from the HW, so stop the watchdog */
7854 dev->trans_start = jiffies;
7855
7856 /* We only process data packets if the
7857 * interface is open */
7858 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7859 skb_tailroom(rxb->skb))) {
7860 dev->stats.rx_errors++;
7861 priv->wstats.discard.misc++;
7862 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7863 return;
7864 } else if (unlikely(!netif_running(priv->net_dev))) {
7865 dev->stats.rx_dropped++;
7866 priv->wstats.discard.misc++;
7867 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7868 return;
7869 }
7870
7871 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7872 * that now */
7873 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7874 /* FIXME: Should alloc bigger skb instead */
7875 dev->stats.rx_dropped++;
7876 priv->wstats.discard.misc++;
7877 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7878 return;
7879 }
7880
7881 /* copy the frame itself */
7882 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7883 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7884
7885 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7886
7887 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7888 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7889 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7890
7891 /* Big bitfield of all the fields we provide in radiotap */
7892 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7893 (1 << IEEE80211_RADIOTAP_TSFT) |
7894 (1 << IEEE80211_RADIOTAP_FLAGS) |
7895 (1 << IEEE80211_RADIOTAP_RATE) |
7896 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7897 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7898 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7899 (1 << IEEE80211_RADIOTAP_ANTENNA));
7900
7901 /* Zero the flags, we'll add to them as we go */
7902 ipw_rt->rt_flags = 0;
7903 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7904 frame->parent_tsf[2] << 16 |
7905 frame->parent_tsf[1] << 8 |
7906 frame->parent_tsf[0]);
7907
7908 /* Convert signal to DBM */
7909 ipw_rt->rt_dbmsignal = antsignal;
7910 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7911
7912 /* Convert the channel data and set the flags */
7913 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7914 if (received_channel > 14) { /* 802.11a */
7915 ipw_rt->rt_chbitmask =
7916 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7917 } else if (antennaAndPhy & 32) { /* 802.11b */
7918 ipw_rt->rt_chbitmask =
7919 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7920 } else { /* 802.11g */
7921 ipw_rt->rt_chbitmask =
7922 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7923 }
7924
7925 /* set the rate in multiples of 500k/s */
7926 switch (pktrate) {
7927 case IPW_TX_RATE_1MB:
7928 ipw_rt->rt_rate = 2;
7929 break;
7930 case IPW_TX_RATE_2MB:
7931 ipw_rt->rt_rate = 4;
7932 break;
7933 case IPW_TX_RATE_5MB:
7934 ipw_rt->rt_rate = 10;
7935 break;
7936 case IPW_TX_RATE_6MB:
7937 ipw_rt->rt_rate = 12;
7938 break;
7939 case IPW_TX_RATE_9MB:
7940 ipw_rt->rt_rate = 18;
7941 break;
7942 case IPW_TX_RATE_11MB:
7943 ipw_rt->rt_rate = 22;
7944 break;
7945 case IPW_TX_RATE_12MB:
7946 ipw_rt->rt_rate = 24;
7947 break;
7948 case IPW_TX_RATE_18MB:
7949 ipw_rt->rt_rate = 36;
7950 break;
7951 case IPW_TX_RATE_24MB:
7952 ipw_rt->rt_rate = 48;
7953 break;
7954 case IPW_TX_RATE_36MB:
7955 ipw_rt->rt_rate = 72;
7956 break;
7957 case IPW_TX_RATE_48MB:
7958 ipw_rt->rt_rate = 96;
7959 break;
7960 case IPW_TX_RATE_54MB:
7961 ipw_rt->rt_rate = 108;
7962 break;
7963 default:
7964 ipw_rt->rt_rate = 0;
7965 break;
7966 }
7967
7968 /* antenna number */
7969 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7970
7971 /* set the preamble flag if we have it */
7972 if ((antennaAndPhy & 64))
7973 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7974
7975 /* Set the size of the skb to the size of the frame */
7976 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7977
7978 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7979
7980 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7981 dev->stats.rx_errors++;
7982 else { /* libipw_rx succeeded, so it now owns the SKB */
7983 rxb->skb = NULL;
7984 /* no LED during capture */
7985 }
7986 }
7987 #endif
7988
7989 #ifdef CONFIG_IPW2200_PROMISCUOUS
7990 #define libipw_is_probe_response(fc) \
7991 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7992 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7993
7994 #define libipw_is_management(fc) \
7995 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7996
7997 #define libipw_is_control(fc) \
7998 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7999
8000 #define libipw_is_data(fc) \
8001 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8002
8003 #define libipw_is_assoc_request(fc) \
8004 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8005
8006 #define libipw_is_reassoc_request(fc) \
8007 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8008
8009 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8010 struct ipw_rx_mem_buffer *rxb,
8011 struct libipw_rx_stats *stats)
8012 {
8013 struct net_device *dev = priv->prom_net_dev;
8014 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8015 struct ipw_rx_frame *frame = &pkt->u.frame;
8016 struct ipw_rt_hdr *ipw_rt;
8017
8018 /* First cache any information we need before we overwrite
8019 * the information provided in the skb from the hardware */
8020 struct ieee80211_hdr *hdr;
8021 u16 channel = frame->received_channel;
8022 u8 phy_flags = frame->antennaAndPhy;
8023 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8024 s8 noise = (s8) le16_to_cpu(frame->noise);
8025 u8 rate = frame->rate;
8026 unsigned short len = le16_to_cpu(pkt->u.frame.length);
8027 struct sk_buff *skb;
8028 int hdr_only = 0;
8029 u16 filter = priv->prom_priv->filter;
8030
8031 /* If the filter is set to not include Rx frames then return */
8032 if (filter & IPW_PROM_NO_RX)
8033 return;
8034
8035 /* We received data from the HW, so stop the watchdog */
8036 dev->trans_start = jiffies;
8037
8038 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8039 dev->stats.rx_errors++;
8040 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8041 return;
8042 }
8043
8044 /* We only process data packets if the interface is open */
8045 if (unlikely(!netif_running(dev))) {
8046 dev->stats.rx_dropped++;
8047 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8048 return;
8049 }
8050
8051 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8052 * that now */
8053 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8054 /* FIXME: Should alloc bigger skb instead */
8055 dev->stats.rx_dropped++;
8056 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8057 return;
8058 }
8059
8060 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8061 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8062 if (filter & IPW_PROM_NO_MGMT)
8063 return;
8064 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8065 hdr_only = 1;
8066 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8067 if (filter & IPW_PROM_NO_CTL)
8068 return;
8069 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8070 hdr_only = 1;
8071 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8072 if (filter & IPW_PROM_NO_DATA)
8073 return;
8074 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8075 hdr_only = 1;
8076 }
8077
8078 /* Copy the SKB since this is for the promiscuous side */
8079 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8080 if (skb == NULL) {
8081 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8082 return;
8083 }
8084
8085 /* copy the frame data to write after where the radiotap header goes */
8086 ipw_rt = (void *)skb->data;
8087
8088 if (hdr_only)
8089 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8090
8091 memcpy(ipw_rt->payload, hdr, len);
8092
8093 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8094 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8095 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8096
8097 /* Set the size of the skb to the size of the frame */
8098 skb_put(skb, sizeof(*ipw_rt) + len);
8099
8100 /* Big bitfield of all the fields we provide in radiotap */
8101 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8102 (1 << IEEE80211_RADIOTAP_TSFT) |
8103 (1 << IEEE80211_RADIOTAP_FLAGS) |
8104 (1 << IEEE80211_RADIOTAP_RATE) |
8105 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8106 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8107 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8108 (1 << IEEE80211_RADIOTAP_ANTENNA));
8109
8110 /* Zero the flags, we'll add to them as we go */
8111 ipw_rt->rt_flags = 0;
8112 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8113 frame->parent_tsf[2] << 16 |
8114 frame->parent_tsf[1] << 8 |
8115 frame->parent_tsf[0]);
8116
8117 /* Convert to DBM */
8118 ipw_rt->rt_dbmsignal = signal;
8119 ipw_rt->rt_dbmnoise = noise;
8120
8121 /* Convert the channel data and set the flags */
8122 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8123 if (channel > 14) { /* 802.11a */
8124 ipw_rt->rt_chbitmask =
8125 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8126 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8127 ipw_rt->rt_chbitmask =
8128 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8129 } else { /* 802.11g */
8130 ipw_rt->rt_chbitmask =
8131 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8132 }
8133
8134 /* set the rate in multiples of 500k/s */
8135 switch (rate) {
8136 case IPW_TX_RATE_1MB:
8137 ipw_rt->rt_rate = 2;
8138 break;
8139 case IPW_TX_RATE_2MB:
8140 ipw_rt->rt_rate = 4;
8141 break;
8142 case IPW_TX_RATE_5MB:
8143 ipw_rt->rt_rate = 10;
8144 break;
8145 case IPW_TX_RATE_6MB:
8146 ipw_rt->rt_rate = 12;
8147 break;
8148 case IPW_TX_RATE_9MB:
8149 ipw_rt->rt_rate = 18;
8150 break;
8151 case IPW_TX_RATE_11MB:
8152 ipw_rt->rt_rate = 22;
8153 break;
8154 case IPW_TX_RATE_12MB:
8155 ipw_rt->rt_rate = 24;
8156 break;
8157 case IPW_TX_RATE_18MB:
8158 ipw_rt->rt_rate = 36;
8159 break;
8160 case IPW_TX_RATE_24MB:
8161 ipw_rt->rt_rate = 48;
8162 break;
8163 case IPW_TX_RATE_36MB:
8164 ipw_rt->rt_rate = 72;
8165 break;
8166 case IPW_TX_RATE_48MB:
8167 ipw_rt->rt_rate = 96;
8168 break;
8169 case IPW_TX_RATE_54MB:
8170 ipw_rt->rt_rate = 108;
8171 break;
8172 default:
8173 ipw_rt->rt_rate = 0;
8174 break;
8175 }
8176
8177 /* antenna number */
8178 ipw_rt->rt_antenna = (phy_flags & 3);
8179
8180 /* set the preamble flag if we have it */
8181 if (phy_flags & (1 << 6))
8182 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8183
8184 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8185
8186 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8187 dev->stats.rx_errors++;
8188 dev_kfree_skb_any(skb);
8189 }
8190 }
8191 #endif
8192
8193 static int is_network_packet(struct ipw_priv *priv,
8194 struct libipw_hdr_4addr *header)
8195 {
8196 /* Filter incoming packets to determine if they are targeted toward
8197 * this network, discarding packets coming from ourselves */
8198 switch (priv->ieee->iw_mode) {
8199 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8200 /* packets from our adapter are dropped (echo) */
8201 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8202 return 0;
8203
8204 /* {broad,multi}cast packets to our BSSID go through */
8205 if (is_multicast_ether_addr(header->addr1))
8206 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8207
8208 /* packets to our adapter go through */
8209 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8210 ETH_ALEN);
8211
8212 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8213 /* packets from our adapter are dropped (echo) */
8214 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8215 return 0;
8216
8217 /* {broad,multi}cast packets to our BSS go through */
8218 if (is_multicast_ether_addr(header->addr1))
8219 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8220
8221 /* packets to our adapter go through */
8222 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8223 ETH_ALEN);
8224 }
8225
8226 return 1;
8227 }
8228
8229 #define IPW_PACKET_RETRY_TIME HZ
8230
8231 static int is_duplicate_packet(struct ipw_priv *priv,
8232 struct libipw_hdr_4addr *header)
8233 {
8234 u16 sc = le16_to_cpu(header->seq_ctl);
8235 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8236 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8237 u16 *last_seq, *last_frag;
8238 unsigned long *last_time;
8239
8240 switch (priv->ieee->iw_mode) {
8241 case IW_MODE_ADHOC:
8242 {
8243 struct list_head *p;
8244 struct ipw_ibss_seq *entry = NULL;
8245 u8 *mac = header->addr2;
8246 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8247
8248 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8249 entry =
8250 list_entry(p, struct ipw_ibss_seq, list);
8251 if (!memcmp(entry->mac, mac, ETH_ALEN))
8252 break;
8253 }
8254 if (p == &priv->ibss_mac_hash[index]) {
8255 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8256 if (!entry) {
8257 IPW_ERROR
8258 ("Cannot malloc new mac entry\n");
8259 return 0;
8260 }
8261 memcpy(entry->mac, mac, ETH_ALEN);
8262 entry->seq_num = seq;
8263 entry->frag_num = frag;
8264 entry->packet_time = jiffies;
8265 list_add(&entry->list,
8266 &priv->ibss_mac_hash[index]);
8267 return 0;
8268 }
8269 last_seq = &entry->seq_num;
8270 last_frag = &entry->frag_num;
8271 last_time = &entry->packet_time;
8272 break;
8273 }
8274 case IW_MODE_INFRA:
8275 last_seq = &priv->last_seq_num;
8276 last_frag = &priv->last_frag_num;
8277 last_time = &priv->last_packet_time;
8278 break;
8279 default:
8280 return 0;
8281 }
8282 if ((*last_seq == seq) &&
8283 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8284 if (*last_frag == frag)
8285 goto drop;
8286 if (*last_frag + 1 != frag)
8287 /* out-of-order fragment */
8288 goto drop;
8289 } else
8290 *last_seq = seq;
8291
8292 *last_frag = frag;
8293 *last_time = jiffies;
8294 return 0;
8295
8296 drop:
8297 /* Comment this line now since we observed the card receives
8298 * duplicate packets but the FCTL_RETRY bit is not set in the
8299 * IBSS mode with fragmentation enabled.
8300 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8301 return 1;
8302 }
8303
8304 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8305 struct ipw_rx_mem_buffer *rxb,
8306 struct libipw_rx_stats *stats)
8307 {
8308 struct sk_buff *skb = rxb->skb;
8309 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8310 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8311 (skb->data + IPW_RX_FRAME_SIZE);
8312
8313 libipw_rx_mgt(priv->ieee, header, stats);
8314
8315 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8316 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8317 IEEE80211_STYPE_PROBE_RESP) ||
8318 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8319 IEEE80211_STYPE_BEACON))) {
8320 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8321 ipw_add_station(priv, header->addr2);
8322 }
8323
8324 if (priv->config & CFG_NET_STATS) {
8325 IPW_DEBUG_HC("sending stat packet\n");
8326
8327 /* Set the size of the skb to the size of the full
8328 * ipw header and 802.11 frame */
8329 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8330 IPW_RX_FRAME_SIZE);
8331
8332 /* Advance past the ipw packet header to the 802.11 frame */
8333 skb_pull(skb, IPW_RX_FRAME_SIZE);
8334
8335 /* Push the libipw_rx_stats before the 802.11 frame */
8336 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8337
8338 skb->dev = priv->ieee->dev;
8339
8340 /* Point raw at the libipw_stats */
8341 skb_reset_mac_header(skb);
8342
8343 skb->pkt_type = PACKET_OTHERHOST;
8344 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8345 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8346 netif_rx(skb);
8347 rxb->skb = NULL;
8348 }
8349 }
8350
8351 /*
8352 * Main entry function for receiving a packet with 80211 headers. This
8353 * should be called when ever the FW has notified us that there is a new
8354 * skb in the receive queue.
8355 */
8356 static void ipw_rx(struct ipw_priv *priv)
8357 {
8358 struct ipw_rx_mem_buffer *rxb;
8359 struct ipw_rx_packet *pkt;
8360 struct libipw_hdr_4addr *header;
8361 u32 r, w, i;
8362 u8 network_packet;
8363 u8 fill_rx = 0;
8364
8365 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8366 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8367 i = priv->rxq->read;
8368
8369 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8370 fill_rx = 1;
8371
8372 while (i != r) {
8373 rxb = priv->rxq->queue[i];
8374 if (unlikely(rxb == NULL)) {
8375 printk(KERN_CRIT "Queue not allocated!\n");
8376 break;
8377 }
8378 priv->rxq->queue[i] = NULL;
8379
8380 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8381 IPW_RX_BUF_SIZE,
8382 PCI_DMA_FROMDEVICE);
8383
8384 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8385 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8386 pkt->header.message_type,
8387 pkt->header.rx_seq_num, pkt->header.control_bits);
8388
8389 switch (pkt->header.message_type) {
8390 case RX_FRAME_TYPE: /* 802.11 frame */ {
8391 struct libipw_rx_stats stats = {
8392 .rssi = pkt->u.frame.rssi_dbm -
8393 IPW_RSSI_TO_DBM,
8394 .signal =
8395 pkt->u.frame.rssi_dbm -
8396 IPW_RSSI_TO_DBM + 0x100,
8397 .noise =
8398 le16_to_cpu(pkt->u.frame.noise),
8399 .rate = pkt->u.frame.rate,
8400 .mac_time = jiffies,
8401 .received_channel =
8402 pkt->u.frame.received_channel,
8403 .freq =
8404 (pkt->u.frame.
8405 control & (1 << 0)) ?
8406 LIBIPW_24GHZ_BAND :
8407 LIBIPW_52GHZ_BAND,
8408 .len = le16_to_cpu(pkt->u.frame.length),
8409 };
8410
8411 if (stats.rssi != 0)
8412 stats.mask |= LIBIPW_STATMASK_RSSI;
8413 if (stats.signal != 0)
8414 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8415 if (stats.noise != 0)
8416 stats.mask |= LIBIPW_STATMASK_NOISE;
8417 if (stats.rate != 0)
8418 stats.mask |= LIBIPW_STATMASK_RATE;
8419
8420 priv->rx_packets++;
8421
8422 #ifdef CONFIG_IPW2200_PROMISCUOUS
8423 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8424 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8425 #endif
8426
8427 #ifdef CONFIG_IPW2200_MONITOR
8428 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8429 #ifdef CONFIG_IPW2200_RADIOTAP
8430
8431 ipw_handle_data_packet_monitor(priv,
8432 rxb,
8433 &stats);
8434 #else
8435 ipw_handle_data_packet(priv, rxb,
8436 &stats);
8437 #endif
8438 break;
8439 }
8440 #endif
8441
8442 header =
8443 (struct libipw_hdr_4addr *)(rxb->skb->
8444 data +
8445 IPW_RX_FRAME_SIZE);
8446 /* TODO: Check Ad-Hoc dest/source and make sure
8447 * that we are actually parsing these packets
8448 * correctly -- we should probably use the
8449 * frame control of the packet and disregard
8450 * the current iw_mode */
8451
8452 network_packet =
8453 is_network_packet(priv, header);
8454 if (network_packet && priv->assoc_network) {
8455 priv->assoc_network->stats.rssi =
8456 stats.rssi;
8457 priv->exp_avg_rssi =
8458 exponential_average(priv->exp_avg_rssi,
8459 stats.rssi, DEPTH_RSSI);
8460 }
8461
8462 IPW_DEBUG_RX("Frame: len=%u\n",
8463 le16_to_cpu(pkt->u.frame.length));
8464
8465 if (le16_to_cpu(pkt->u.frame.length) <
8466 libipw_get_hdrlen(le16_to_cpu(
8467 header->frame_ctl))) {
8468 IPW_DEBUG_DROP
8469 ("Received packet is too small. "
8470 "Dropping.\n");
8471 priv->net_dev->stats.rx_errors++;
8472 priv->wstats.discard.misc++;
8473 break;
8474 }
8475
8476 switch (WLAN_FC_GET_TYPE
8477 (le16_to_cpu(header->frame_ctl))) {
8478
8479 case IEEE80211_FTYPE_MGMT:
8480 ipw_handle_mgmt_packet(priv, rxb,
8481 &stats);
8482 break;
8483
8484 case IEEE80211_FTYPE_CTL:
8485 break;
8486
8487 case IEEE80211_FTYPE_DATA:
8488 if (unlikely(!network_packet ||
8489 is_duplicate_packet(priv,
8490 header)))
8491 {
8492 IPW_DEBUG_DROP("Dropping: "
8493 "%pM, "
8494 "%pM, "
8495 "%pM\n",
8496 header->addr1,
8497 header->addr2,
8498 header->addr3);
8499 break;
8500 }
8501
8502 ipw_handle_data_packet(priv, rxb,
8503 &stats);
8504
8505 break;
8506 }
8507 break;
8508 }
8509
8510 case RX_HOST_NOTIFICATION_TYPE:{
8511 IPW_DEBUG_RX
8512 ("Notification: subtype=%02X flags=%02X size=%d\n",
8513 pkt->u.notification.subtype,
8514 pkt->u.notification.flags,
8515 le16_to_cpu(pkt->u.notification.size));
8516 ipw_rx_notification(priv, &pkt->u.notification);
8517 break;
8518 }
8519
8520 default:
8521 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8522 pkt->header.message_type);
8523 break;
8524 }
8525
8526 /* For now we just don't re-use anything. We can tweak this
8527 * later to try and re-use notification packets and SKBs that
8528 * fail to Rx correctly */
8529 if (rxb->skb != NULL) {
8530 dev_kfree_skb_any(rxb->skb);
8531 rxb->skb = NULL;
8532 }
8533
8534 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8535 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8536 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8537
8538 i = (i + 1) % RX_QUEUE_SIZE;
8539
8540 /* If there are a lot of unsued frames, restock the Rx queue
8541 * so the ucode won't assert */
8542 if (fill_rx) {
8543 priv->rxq->read = i;
8544 ipw_rx_queue_replenish(priv);
8545 }
8546 }
8547
8548 /* Backtrack one entry */
8549 priv->rxq->read = i;
8550 ipw_rx_queue_restock(priv);
8551 }
8552
8553 #define DEFAULT_RTS_THRESHOLD 2304U
8554 #define MIN_RTS_THRESHOLD 1U
8555 #define MAX_RTS_THRESHOLD 2304U
8556 #define DEFAULT_BEACON_INTERVAL 100U
8557 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8558 #define DEFAULT_LONG_RETRY_LIMIT 4U
8559
8560 /**
8561 * ipw_sw_reset
8562 * @option: options to control different reset behaviour
8563 * 0 = reset everything except the 'disable' module_param
8564 * 1 = reset everything and print out driver info (for probe only)
8565 * 2 = reset everything
8566 */
8567 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8568 {
8569 int band, modulation;
8570 int old_mode = priv->ieee->iw_mode;
8571
8572 /* Initialize module parameter values here */
8573 priv->config = 0;
8574
8575 /* We default to disabling the LED code as right now it causes
8576 * too many systems to lock up... */
8577 if (!led_support)
8578 priv->config |= CFG_NO_LED;
8579
8580 if (associate)
8581 priv->config |= CFG_ASSOCIATE;
8582 else
8583 IPW_DEBUG_INFO("Auto associate disabled.\n");
8584
8585 if (auto_create)
8586 priv->config |= CFG_ADHOC_CREATE;
8587 else
8588 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8589
8590 priv->config &= ~CFG_STATIC_ESSID;
8591 priv->essid_len = 0;
8592 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8593
8594 if (disable && option) {
8595 priv->status |= STATUS_RF_KILL_SW;
8596 IPW_DEBUG_INFO("Radio disabled.\n");
8597 }
8598
8599 if (default_channel != 0) {
8600 priv->config |= CFG_STATIC_CHANNEL;
8601 priv->channel = default_channel;
8602 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8603 /* TODO: Validate that provided channel is in range */
8604 }
8605 #ifdef CONFIG_IPW2200_QOS
8606 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8607 burst_duration_CCK, burst_duration_OFDM);
8608 #endif /* CONFIG_IPW2200_QOS */
8609
8610 switch (network_mode) {
8611 case 1:
8612 priv->ieee->iw_mode = IW_MODE_ADHOC;
8613 priv->net_dev->type = ARPHRD_ETHER;
8614
8615 break;
8616 #ifdef CONFIG_IPW2200_MONITOR
8617 case 2:
8618 priv->ieee->iw_mode = IW_MODE_MONITOR;
8619 #ifdef CONFIG_IPW2200_RADIOTAP
8620 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8621 #else
8622 priv->net_dev->type = ARPHRD_IEEE80211;
8623 #endif
8624 break;
8625 #endif
8626 default:
8627 case 0:
8628 priv->net_dev->type = ARPHRD_ETHER;
8629 priv->ieee->iw_mode = IW_MODE_INFRA;
8630 break;
8631 }
8632
8633 if (hwcrypto) {
8634 priv->ieee->host_encrypt = 0;
8635 priv->ieee->host_encrypt_msdu = 0;
8636 priv->ieee->host_decrypt = 0;
8637 priv->ieee->host_mc_decrypt = 0;
8638 }
8639 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8640
8641 /* IPW2200/2915 is abled to do hardware fragmentation. */
8642 priv->ieee->host_open_frag = 0;
8643
8644 if ((priv->pci_dev->device == 0x4223) ||
8645 (priv->pci_dev->device == 0x4224)) {
8646 if (option == 1)
8647 printk(KERN_INFO DRV_NAME
8648 ": Detected Intel PRO/Wireless 2915ABG Network "
8649 "Connection\n");
8650 priv->ieee->abg_true = 1;
8651 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8652 modulation = LIBIPW_OFDM_MODULATION |
8653 LIBIPW_CCK_MODULATION;
8654 priv->adapter = IPW_2915ABG;
8655 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8656 } else {
8657 if (option == 1)
8658 printk(KERN_INFO DRV_NAME
8659 ": Detected Intel PRO/Wireless 2200BG Network "
8660 "Connection\n");
8661
8662 priv->ieee->abg_true = 0;
8663 band = LIBIPW_24GHZ_BAND;
8664 modulation = LIBIPW_OFDM_MODULATION |
8665 LIBIPW_CCK_MODULATION;
8666 priv->adapter = IPW_2200BG;
8667 priv->ieee->mode = IEEE_G | IEEE_B;
8668 }
8669
8670 priv->ieee->freq_band = band;
8671 priv->ieee->modulation = modulation;
8672
8673 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8674
8675 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8676 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8677
8678 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8679 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8680 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8681
8682 /* If power management is turned on, default to AC mode */
8683 priv->power_mode = IPW_POWER_AC;
8684 priv->tx_power = IPW_TX_POWER_DEFAULT;
8685
8686 return old_mode == priv->ieee->iw_mode;
8687 }
8688
8689 /*
8690 * This file defines the Wireless Extension handlers. It does not
8691 * define any methods of hardware manipulation and relies on the
8692 * functions defined in ipw_main to provide the HW interaction.
8693 *
8694 * The exception to this is the use of the ipw_get_ordinal()
8695 * function used to poll the hardware vs. making unnecessary calls.
8696 *
8697 */
8698
8699 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8700 {
8701 if (channel == 0) {
8702 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8703 priv->config &= ~CFG_STATIC_CHANNEL;
8704 IPW_DEBUG_ASSOC("Attempting to associate with new "
8705 "parameters.\n");
8706 ipw_associate(priv);
8707 return 0;
8708 }
8709
8710 priv->config |= CFG_STATIC_CHANNEL;
8711
8712 if (priv->channel == channel) {
8713 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8714 channel);
8715 return 0;
8716 }
8717
8718 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8719 priv->channel = channel;
8720
8721 #ifdef CONFIG_IPW2200_MONITOR
8722 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8723 int i;
8724 if (priv->status & STATUS_SCANNING) {
8725 IPW_DEBUG_SCAN("Scan abort triggered due to "
8726 "channel change.\n");
8727 ipw_abort_scan(priv);
8728 }
8729
8730 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8731 udelay(10);
8732
8733 if (priv->status & STATUS_SCANNING)
8734 IPW_DEBUG_SCAN("Still scanning...\n");
8735 else
8736 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8737 1000 - i);
8738
8739 return 0;
8740 }
8741 #endif /* CONFIG_IPW2200_MONITOR */
8742
8743 /* Network configuration changed -- force [re]association */
8744 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8745 if (!ipw_disassociate(priv))
8746 ipw_associate(priv);
8747
8748 return 0;
8749 }
8750
8751 static int ipw_wx_set_freq(struct net_device *dev,
8752 struct iw_request_info *info,
8753 union iwreq_data *wrqu, char *extra)
8754 {
8755 struct ipw_priv *priv = libipw_priv(dev);
8756 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8757 struct iw_freq *fwrq = &wrqu->freq;
8758 int ret = 0, i;
8759 u8 channel, flags;
8760 int band;
8761
8762 if (fwrq->m == 0) {
8763 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8764 mutex_lock(&priv->mutex);
8765 ret = ipw_set_channel(priv, 0);
8766 mutex_unlock(&priv->mutex);
8767 return ret;
8768 }
8769 /* if setting by freq convert to channel */
8770 if (fwrq->e == 1) {
8771 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8772 if (channel == 0)
8773 return -EINVAL;
8774 } else
8775 channel = fwrq->m;
8776
8777 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8778 return -EINVAL;
8779
8780 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8781 i = libipw_channel_to_index(priv->ieee, channel);
8782 if (i == -1)
8783 return -EINVAL;
8784
8785 flags = (band == LIBIPW_24GHZ_BAND) ?
8786 geo->bg[i].flags : geo->a[i].flags;
8787 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8788 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8789 return -EINVAL;
8790 }
8791 }
8792
8793 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8794 mutex_lock(&priv->mutex);
8795 ret = ipw_set_channel(priv, channel);
8796 mutex_unlock(&priv->mutex);
8797 return ret;
8798 }
8799
8800 static int ipw_wx_get_freq(struct net_device *dev,
8801 struct iw_request_info *info,
8802 union iwreq_data *wrqu, char *extra)
8803 {
8804 struct ipw_priv *priv = libipw_priv(dev);
8805
8806 wrqu->freq.e = 0;
8807
8808 /* If we are associated, trying to associate, or have a statically
8809 * configured CHANNEL then return that; otherwise return ANY */
8810 mutex_lock(&priv->mutex);
8811 if (priv->config & CFG_STATIC_CHANNEL ||
8812 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8813 int i;
8814
8815 i = libipw_channel_to_index(priv->ieee, priv->channel);
8816 BUG_ON(i == -1);
8817 wrqu->freq.e = 1;
8818
8819 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8820 case LIBIPW_52GHZ_BAND:
8821 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8822 break;
8823
8824 case LIBIPW_24GHZ_BAND:
8825 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8826 break;
8827
8828 default:
8829 BUG();
8830 }
8831 } else
8832 wrqu->freq.m = 0;
8833
8834 mutex_unlock(&priv->mutex);
8835 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8836 return 0;
8837 }
8838
8839 static int ipw_wx_set_mode(struct net_device *dev,
8840 struct iw_request_info *info,
8841 union iwreq_data *wrqu, char *extra)
8842 {
8843 struct ipw_priv *priv = libipw_priv(dev);
8844 int err = 0;
8845
8846 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8847
8848 switch (wrqu->mode) {
8849 #ifdef CONFIG_IPW2200_MONITOR
8850 case IW_MODE_MONITOR:
8851 #endif
8852 case IW_MODE_ADHOC:
8853 case IW_MODE_INFRA:
8854 break;
8855 case IW_MODE_AUTO:
8856 wrqu->mode = IW_MODE_INFRA;
8857 break;
8858 default:
8859 return -EINVAL;
8860 }
8861 if (wrqu->mode == priv->ieee->iw_mode)
8862 return 0;
8863
8864 mutex_lock(&priv->mutex);
8865
8866 ipw_sw_reset(priv, 0);
8867
8868 #ifdef CONFIG_IPW2200_MONITOR
8869 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8870 priv->net_dev->type = ARPHRD_ETHER;
8871
8872 if (wrqu->mode == IW_MODE_MONITOR)
8873 #ifdef CONFIG_IPW2200_RADIOTAP
8874 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8875 #else
8876 priv->net_dev->type = ARPHRD_IEEE80211;
8877 #endif
8878 #endif /* CONFIG_IPW2200_MONITOR */
8879
8880 /* Free the existing firmware and reset the fw_loaded
8881 * flag so ipw_load() will bring in the new firmware */
8882 free_firmware();
8883
8884 priv->ieee->iw_mode = wrqu->mode;
8885
8886 schedule_work(&priv->adapter_restart);
8887 mutex_unlock(&priv->mutex);
8888 return err;
8889 }
8890
8891 static int ipw_wx_get_mode(struct net_device *dev,
8892 struct iw_request_info *info,
8893 union iwreq_data *wrqu, char *extra)
8894 {
8895 struct ipw_priv *priv = libipw_priv(dev);
8896 mutex_lock(&priv->mutex);
8897 wrqu->mode = priv->ieee->iw_mode;
8898 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8899 mutex_unlock(&priv->mutex);
8900 return 0;
8901 }
8902
8903 /* Values are in microsecond */
8904 static const s32 timeout_duration[] = {
8905 350000,
8906 250000,
8907 75000,
8908 37000,
8909 25000,
8910 };
8911
8912 static const s32 period_duration[] = {
8913 400000,
8914 700000,
8915 1000000,
8916 1000000,
8917 1000000
8918 };
8919
8920 static int ipw_wx_get_range(struct net_device *dev,
8921 struct iw_request_info *info,
8922 union iwreq_data *wrqu, char *extra)
8923 {
8924 struct ipw_priv *priv = libipw_priv(dev);
8925 struct iw_range *range = (struct iw_range *)extra;
8926 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8927 int i = 0, j;
8928
8929 wrqu->data.length = sizeof(*range);
8930 memset(range, 0, sizeof(*range));
8931
8932 /* 54Mbs == ~27 Mb/s real (802.11g) */
8933 range->throughput = 27 * 1000 * 1000;
8934
8935 range->max_qual.qual = 100;
8936 /* TODO: Find real max RSSI and stick here */
8937 range->max_qual.level = 0;
8938 range->max_qual.noise = 0;
8939 range->max_qual.updated = 7; /* Updated all three */
8940
8941 range->avg_qual.qual = 70;
8942 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8943 range->avg_qual.level = 0; /* FIXME to real average level */
8944 range->avg_qual.noise = 0;
8945 range->avg_qual.updated = 7; /* Updated all three */
8946 mutex_lock(&priv->mutex);
8947 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8948
8949 for (i = 0; i < range->num_bitrates; i++)
8950 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8951 500000;
8952
8953 range->max_rts = DEFAULT_RTS_THRESHOLD;
8954 range->min_frag = MIN_FRAG_THRESHOLD;
8955 range->max_frag = MAX_FRAG_THRESHOLD;
8956
8957 range->encoding_size[0] = 5;
8958 range->encoding_size[1] = 13;
8959 range->num_encoding_sizes = 2;
8960 range->max_encoding_tokens = WEP_KEYS;
8961
8962 /* Set the Wireless Extension versions */
8963 range->we_version_compiled = WIRELESS_EXT;
8964 range->we_version_source = 18;
8965
8966 i = 0;
8967 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8968 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8969 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8970 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8971 continue;
8972
8973 range->freq[i].i = geo->bg[j].channel;
8974 range->freq[i].m = geo->bg[j].freq * 100000;
8975 range->freq[i].e = 1;
8976 i++;
8977 }
8978 }
8979
8980 if (priv->ieee->mode & IEEE_A) {
8981 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8982 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8983 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8984 continue;
8985
8986 range->freq[i].i = geo->a[j].channel;
8987 range->freq[i].m = geo->a[j].freq * 100000;
8988 range->freq[i].e = 1;
8989 i++;
8990 }
8991 }
8992
8993 range->num_channels = i;
8994 range->num_frequency = i;
8995
8996 mutex_unlock(&priv->mutex);
8997
8998 /* Event capability (kernel + driver) */
8999 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9000 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9001 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9002 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9003 range->event_capa[1] = IW_EVENT_CAPA_K_1;
9004
9005 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9006 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9007
9008 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9009
9010 IPW_DEBUG_WX("GET Range\n");
9011 return 0;
9012 }
9013
9014 static int ipw_wx_set_wap(struct net_device *dev,
9015 struct iw_request_info *info,
9016 union iwreq_data *wrqu, char *extra)
9017 {
9018 struct ipw_priv *priv = libipw_priv(dev);
9019
9020 static const unsigned char any[] = {
9021 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9022 };
9023 static const unsigned char off[] = {
9024 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9025 };
9026
9027 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9028 return -EINVAL;
9029 mutex_lock(&priv->mutex);
9030 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9031 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9032 /* we disable mandatory BSSID association */
9033 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9034 priv->config &= ~CFG_STATIC_BSSID;
9035 IPW_DEBUG_ASSOC("Attempting to associate with new "
9036 "parameters.\n");
9037 ipw_associate(priv);
9038 mutex_unlock(&priv->mutex);
9039 return 0;
9040 }
9041
9042 priv->config |= CFG_STATIC_BSSID;
9043 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9044 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9045 mutex_unlock(&priv->mutex);
9046 return 0;
9047 }
9048
9049 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9050 wrqu->ap_addr.sa_data);
9051
9052 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9053
9054 /* Network configuration changed -- force [re]association */
9055 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9056 if (!ipw_disassociate(priv))
9057 ipw_associate(priv);
9058
9059 mutex_unlock(&priv->mutex);
9060 return 0;
9061 }
9062
9063 static int ipw_wx_get_wap(struct net_device *dev,
9064 struct iw_request_info *info,
9065 union iwreq_data *wrqu, char *extra)
9066 {
9067 struct ipw_priv *priv = libipw_priv(dev);
9068
9069 /* If we are associated, trying to associate, or have a statically
9070 * configured BSSID then return that; otherwise return ANY */
9071 mutex_lock(&priv->mutex);
9072 if (priv->config & CFG_STATIC_BSSID ||
9073 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9074 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9075 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9076 } else
9077 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9078
9079 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9080 wrqu->ap_addr.sa_data);
9081 mutex_unlock(&priv->mutex);
9082 return 0;
9083 }
9084
9085 static int ipw_wx_set_essid(struct net_device *dev,
9086 struct iw_request_info *info,
9087 union iwreq_data *wrqu, char *extra)
9088 {
9089 struct ipw_priv *priv = libipw_priv(dev);
9090 int length;
9091 DECLARE_SSID_BUF(ssid);
9092
9093 mutex_lock(&priv->mutex);
9094
9095 if (!wrqu->essid.flags)
9096 {
9097 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9098 ipw_disassociate(priv);
9099 priv->config &= ~CFG_STATIC_ESSID;
9100 ipw_associate(priv);
9101 mutex_unlock(&priv->mutex);
9102 return 0;
9103 }
9104
9105 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9106
9107 priv->config |= CFG_STATIC_ESSID;
9108
9109 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9110 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9111 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9112 mutex_unlock(&priv->mutex);
9113 return 0;
9114 }
9115
9116 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9117 print_ssid(ssid, extra, length), length);
9118
9119 priv->essid_len = length;
9120 memcpy(priv->essid, extra, priv->essid_len);
9121
9122 /* Network configuration changed -- force [re]association */
9123 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9124 if (!ipw_disassociate(priv))
9125 ipw_associate(priv);
9126
9127 mutex_unlock(&priv->mutex);
9128 return 0;
9129 }
9130
9131 static int ipw_wx_get_essid(struct net_device *dev,
9132 struct iw_request_info *info,
9133 union iwreq_data *wrqu, char *extra)
9134 {
9135 struct ipw_priv *priv = libipw_priv(dev);
9136 DECLARE_SSID_BUF(ssid);
9137
9138 /* If we are associated, trying to associate, or have a statically
9139 * configured ESSID then return that; otherwise return ANY */
9140 mutex_lock(&priv->mutex);
9141 if (priv->config & CFG_STATIC_ESSID ||
9142 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9143 IPW_DEBUG_WX("Getting essid: '%s'\n",
9144 print_ssid(ssid, priv->essid, priv->essid_len));
9145 memcpy(extra, priv->essid, priv->essid_len);
9146 wrqu->essid.length = priv->essid_len;
9147 wrqu->essid.flags = 1; /* active */
9148 } else {
9149 IPW_DEBUG_WX("Getting essid: ANY\n");
9150 wrqu->essid.length = 0;
9151 wrqu->essid.flags = 0; /* active */
9152 }
9153 mutex_unlock(&priv->mutex);
9154 return 0;
9155 }
9156
9157 static int ipw_wx_set_nick(struct net_device *dev,
9158 struct iw_request_info *info,
9159 union iwreq_data *wrqu, char *extra)
9160 {
9161 struct ipw_priv *priv = libipw_priv(dev);
9162
9163 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9164 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9165 return -E2BIG;
9166 mutex_lock(&priv->mutex);
9167 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9168 memset(priv->nick, 0, sizeof(priv->nick));
9169 memcpy(priv->nick, extra, wrqu->data.length);
9170 IPW_DEBUG_TRACE("<<\n");
9171 mutex_unlock(&priv->mutex);
9172 return 0;
9173
9174 }
9175
9176 static int ipw_wx_get_nick(struct net_device *dev,
9177 struct iw_request_info *info,
9178 union iwreq_data *wrqu, char *extra)
9179 {
9180 struct ipw_priv *priv = libipw_priv(dev);
9181 IPW_DEBUG_WX("Getting nick\n");
9182 mutex_lock(&priv->mutex);
9183 wrqu->data.length = strlen(priv->nick);
9184 memcpy(extra, priv->nick, wrqu->data.length);
9185 wrqu->data.flags = 1; /* active */
9186 mutex_unlock(&priv->mutex);
9187 return 0;
9188 }
9189
9190 static int ipw_wx_set_sens(struct net_device *dev,
9191 struct iw_request_info *info,
9192 union iwreq_data *wrqu, char *extra)
9193 {
9194 struct ipw_priv *priv = libipw_priv(dev);
9195 int err = 0;
9196
9197 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9198 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9199 mutex_lock(&priv->mutex);
9200
9201 if (wrqu->sens.fixed == 0)
9202 {
9203 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9204 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9205 goto out;
9206 }
9207 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9208 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9209 err = -EINVAL;
9210 goto out;
9211 }
9212
9213 priv->roaming_threshold = wrqu->sens.value;
9214 priv->disassociate_threshold = 3*wrqu->sens.value;
9215 out:
9216 mutex_unlock(&priv->mutex);
9217 return err;
9218 }
9219
9220 static int ipw_wx_get_sens(struct net_device *dev,
9221 struct iw_request_info *info,
9222 union iwreq_data *wrqu, char *extra)
9223 {
9224 struct ipw_priv *priv = libipw_priv(dev);
9225 mutex_lock(&priv->mutex);
9226 wrqu->sens.fixed = 1;
9227 wrqu->sens.value = priv->roaming_threshold;
9228 mutex_unlock(&priv->mutex);
9229
9230 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9231 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9232
9233 return 0;
9234 }
9235
9236 static int ipw_wx_set_rate(struct net_device *dev,
9237 struct iw_request_info *info,
9238 union iwreq_data *wrqu, char *extra)
9239 {
9240 /* TODO: We should use semaphores or locks for access to priv */
9241 struct ipw_priv *priv = libipw_priv(dev);
9242 u32 target_rate = wrqu->bitrate.value;
9243 u32 fixed, mask;
9244
9245 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9246 /* value = X, fixed = 1 means only rate X */
9247 /* value = X, fixed = 0 means all rates lower equal X */
9248
9249 if (target_rate == -1) {
9250 fixed = 0;
9251 mask = LIBIPW_DEFAULT_RATES_MASK;
9252 /* Now we should reassociate */
9253 goto apply;
9254 }
9255
9256 mask = 0;
9257 fixed = wrqu->bitrate.fixed;
9258
9259 if (target_rate == 1000000 || !fixed)
9260 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9261 if (target_rate == 1000000)
9262 goto apply;
9263
9264 if (target_rate == 2000000 || !fixed)
9265 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9266 if (target_rate == 2000000)
9267 goto apply;
9268
9269 if (target_rate == 5500000 || !fixed)
9270 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9271 if (target_rate == 5500000)
9272 goto apply;
9273
9274 if (target_rate == 6000000 || !fixed)
9275 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9276 if (target_rate == 6000000)
9277 goto apply;
9278
9279 if (target_rate == 9000000 || !fixed)
9280 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9281 if (target_rate == 9000000)
9282 goto apply;
9283
9284 if (target_rate == 11000000 || !fixed)
9285 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9286 if (target_rate == 11000000)
9287 goto apply;
9288
9289 if (target_rate == 12000000 || !fixed)
9290 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9291 if (target_rate == 12000000)
9292 goto apply;
9293
9294 if (target_rate == 18000000 || !fixed)
9295 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9296 if (target_rate == 18000000)
9297 goto apply;
9298
9299 if (target_rate == 24000000 || !fixed)
9300 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9301 if (target_rate == 24000000)
9302 goto apply;
9303
9304 if (target_rate == 36000000 || !fixed)
9305 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9306 if (target_rate == 36000000)
9307 goto apply;
9308
9309 if (target_rate == 48000000 || !fixed)
9310 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9311 if (target_rate == 48000000)
9312 goto apply;
9313
9314 if (target_rate == 54000000 || !fixed)
9315 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9316 if (target_rate == 54000000)
9317 goto apply;
9318
9319 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9320 return -EINVAL;
9321
9322 apply:
9323 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9324 mask, fixed ? "fixed" : "sub-rates");
9325 mutex_lock(&priv->mutex);
9326 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9327 priv->config &= ~CFG_FIXED_RATE;
9328 ipw_set_fixed_rate(priv, priv->ieee->mode);
9329 } else
9330 priv->config |= CFG_FIXED_RATE;
9331
9332 if (priv->rates_mask == mask) {
9333 IPW_DEBUG_WX("Mask set to current mask.\n");
9334 mutex_unlock(&priv->mutex);
9335 return 0;
9336 }
9337
9338 priv->rates_mask = mask;
9339
9340 /* Network configuration changed -- force [re]association */
9341 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9342 if (!ipw_disassociate(priv))
9343 ipw_associate(priv);
9344
9345 mutex_unlock(&priv->mutex);
9346 return 0;
9347 }
9348
9349 static int ipw_wx_get_rate(struct net_device *dev,
9350 struct iw_request_info *info,
9351 union iwreq_data *wrqu, char *extra)
9352 {
9353 struct ipw_priv *priv = libipw_priv(dev);
9354 mutex_lock(&priv->mutex);
9355 wrqu->bitrate.value = priv->last_rate;
9356 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9357 mutex_unlock(&priv->mutex);
9358 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9359 return 0;
9360 }
9361
9362 static int ipw_wx_set_rts(struct net_device *dev,
9363 struct iw_request_info *info,
9364 union iwreq_data *wrqu, char *extra)
9365 {
9366 struct ipw_priv *priv = libipw_priv(dev);
9367 mutex_lock(&priv->mutex);
9368 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9369 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9370 else {
9371 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9372 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9373 mutex_unlock(&priv->mutex);
9374 return -EINVAL;
9375 }
9376 priv->rts_threshold = wrqu->rts.value;
9377 }
9378
9379 ipw_send_rts_threshold(priv, priv->rts_threshold);
9380 mutex_unlock(&priv->mutex);
9381 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9382 return 0;
9383 }
9384
9385 static int ipw_wx_get_rts(struct net_device *dev,
9386 struct iw_request_info *info,
9387 union iwreq_data *wrqu, char *extra)
9388 {
9389 struct ipw_priv *priv = libipw_priv(dev);
9390 mutex_lock(&priv->mutex);
9391 wrqu->rts.value = priv->rts_threshold;
9392 wrqu->rts.fixed = 0; /* no auto select */
9393 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9394 mutex_unlock(&priv->mutex);
9395 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9396 return 0;
9397 }
9398
9399 static int ipw_wx_set_txpow(struct net_device *dev,
9400 struct iw_request_info *info,
9401 union iwreq_data *wrqu, char *extra)
9402 {
9403 struct ipw_priv *priv = libipw_priv(dev);
9404 int err = 0;
9405
9406 mutex_lock(&priv->mutex);
9407 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9408 err = -EINPROGRESS;
9409 goto out;
9410 }
9411
9412 if (!wrqu->power.fixed)
9413 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9414
9415 if (wrqu->power.flags != IW_TXPOW_DBM) {
9416 err = -EINVAL;
9417 goto out;
9418 }
9419
9420 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9421 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9422 err = -EINVAL;
9423 goto out;
9424 }
9425
9426 priv->tx_power = wrqu->power.value;
9427 err = ipw_set_tx_power(priv);
9428 out:
9429 mutex_unlock(&priv->mutex);
9430 return err;
9431 }
9432
9433 static int ipw_wx_get_txpow(struct net_device *dev,
9434 struct iw_request_info *info,
9435 union iwreq_data *wrqu, char *extra)
9436 {
9437 struct ipw_priv *priv = libipw_priv(dev);
9438 mutex_lock(&priv->mutex);
9439 wrqu->power.value = priv->tx_power;
9440 wrqu->power.fixed = 1;
9441 wrqu->power.flags = IW_TXPOW_DBM;
9442 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9443 mutex_unlock(&priv->mutex);
9444
9445 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9446 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9447
9448 return 0;
9449 }
9450
9451 static int ipw_wx_set_frag(struct net_device *dev,
9452 struct iw_request_info *info,
9453 union iwreq_data *wrqu, char *extra)
9454 {
9455 struct ipw_priv *priv = libipw_priv(dev);
9456 mutex_lock(&priv->mutex);
9457 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9458 priv->ieee->fts = DEFAULT_FTS;
9459 else {
9460 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9461 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9462 mutex_unlock(&priv->mutex);
9463 return -EINVAL;
9464 }
9465
9466 priv->ieee->fts = wrqu->frag.value & ~0x1;
9467 }
9468
9469 ipw_send_frag_threshold(priv, wrqu->frag.value);
9470 mutex_unlock(&priv->mutex);
9471 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9472 return 0;
9473 }
9474
9475 static int ipw_wx_get_frag(struct net_device *dev,
9476 struct iw_request_info *info,
9477 union iwreq_data *wrqu, char *extra)
9478 {
9479 struct ipw_priv *priv = libipw_priv(dev);
9480 mutex_lock(&priv->mutex);
9481 wrqu->frag.value = priv->ieee->fts;
9482 wrqu->frag.fixed = 0; /* no auto select */
9483 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9484 mutex_unlock(&priv->mutex);
9485 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9486
9487 return 0;
9488 }
9489
9490 static int ipw_wx_set_retry(struct net_device *dev,
9491 struct iw_request_info *info,
9492 union iwreq_data *wrqu, char *extra)
9493 {
9494 struct ipw_priv *priv = libipw_priv(dev);
9495
9496 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9497 return -EINVAL;
9498
9499 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9500 return 0;
9501
9502 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9503 return -EINVAL;
9504
9505 mutex_lock(&priv->mutex);
9506 if (wrqu->retry.flags & IW_RETRY_SHORT)
9507 priv->short_retry_limit = (u8) wrqu->retry.value;
9508 else if (wrqu->retry.flags & IW_RETRY_LONG)
9509 priv->long_retry_limit = (u8) wrqu->retry.value;
9510 else {
9511 priv->short_retry_limit = (u8) wrqu->retry.value;
9512 priv->long_retry_limit = (u8) wrqu->retry.value;
9513 }
9514
9515 ipw_send_retry_limit(priv, priv->short_retry_limit,
9516 priv->long_retry_limit);
9517 mutex_unlock(&priv->mutex);
9518 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9519 priv->short_retry_limit, priv->long_retry_limit);
9520 return 0;
9521 }
9522
9523 static int ipw_wx_get_retry(struct net_device *dev,
9524 struct iw_request_info *info,
9525 union iwreq_data *wrqu, char *extra)
9526 {
9527 struct ipw_priv *priv = libipw_priv(dev);
9528
9529 mutex_lock(&priv->mutex);
9530 wrqu->retry.disabled = 0;
9531
9532 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9533 mutex_unlock(&priv->mutex);
9534 return -EINVAL;
9535 }
9536
9537 if (wrqu->retry.flags & IW_RETRY_LONG) {
9538 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9539 wrqu->retry.value = priv->long_retry_limit;
9540 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9541 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9542 wrqu->retry.value = priv->short_retry_limit;
9543 } else {
9544 wrqu->retry.flags = IW_RETRY_LIMIT;
9545 wrqu->retry.value = priv->short_retry_limit;
9546 }
9547 mutex_unlock(&priv->mutex);
9548
9549 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9550
9551 return 0;
9552 }
9553
9554 static int ipw_wx_set_scan(struct net_device *dev,
9555 struct iw_request_info *info,
9556 union iwreq_data *wrqu, char *extra)
9557 {
9558 struct ipw_priv *priv = libipw_priv(dev);
9559 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9560 struct delayed_work *work = NULL;
9561
9562 mutex_lock(&priv->mutex);
9563
9564 priv->user_requested_scan = 1;
9565
9566 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9567 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9568 int len = min((int)req->essid_len,
9569 (int)sizeof(priv->direct_scan_ssid));
9570 memcpy(priv->direct_scan_ssid, req->essid, len);
9571 priv->direct_scan_ssid_len = len;
9572 work = &priv->request_direct_scan;
9573 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9574 work = &priv->request_passive_scan;
9575 }
9576 } else {
9577 /* Normal active broadcast scan */
9578 work = &priv->request_scan;
9579 }
9580
9581 mutex_unlock(&priv->mutex);
9582
9583 IPW_DEBUG_WX("Start scan\n");
9584
9585 schedule_delayed_work(work, 0);
9586
9587 return 0;
9588 }
9589
9590 static int ipw_wx_get_scan(struct net_device *dev,
9591 struct iw_request_info *info,
9592 union iwreq_data *wrqu, char *extra)
9593 {
9594 struct ipw_priv *priv = libipw_priv(dev);
9595 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9596 }
9597
9598 static int ipw_wx_set_encode(struct net_device *dev,
9599 struct iw_request_info *info,
9600 union iwreq_data *wrqu, char *key)
9601 {
9602 struct ipw_priv *priv = libipw_priv(dev);
9603 int ret;
9604 u32 cap = priv->capability;
9605
9606 mutex_lock(&priv->mutex);
9607 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9608
9609 /* In IBSS mode, we need to notify the firmware to update
9610 * the beacon info after we changed the capability. */
9611 if (cap != priv->capability &&
9612 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9613 priv->status & STATUS_ASSOCIATED)
9614 ipw_disassociate(priv);
9615
9616 mutex_unlock(&priv->mutex);
9617 return ret;
9618 }
9619
9620 static int ipw_wx_get_encode(struct net_device *dev,
9621 struct iw_request_info *info,
9622 union iwreq_data *wrqu, char *key)
9623 {
9624 struct ipw_priv *priv = libipw_priv(dev);
9625 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9626 }
9627
9628 static int ipw_wx_set_power(struct net_device *dev,
9629 struct iw_request_info *info,
9630 union iwreq_data *wrqu, char *extra)
9631 {
9632 struct ipw_priv *priv = libipw_priv(dev);
9633 int err;
9634 mutex_lock(&priv->mutex);
9635 if (wrqu->power.disabled) {
9636 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9637 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9638 if (err) {
9639 IPW_DEBUG_WX("failed setting power mode.\n");
9640 mutex_unlock(&priv->mutex);
9641 return err;
9642 }
9643 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9644 mutex_unlock(&priv->mutex);
9645 return 0;
9646 }
9647
9648 switch (wrqu->power.flags & IW_POWER_MODE) {
9649 case IW_POWER_ON: /* If not specified */
9650 case IW_POWER_MODE: /* If set all mask */
9651 case IW_POWER_ALL_R: /* If explicitly state all */
9652 break;
9653 default: /* Otherwise we don't support it */
9654 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9655 wrqu->power.flags);
9656 mutex_unlock(&priv->mutex);
9657 return -EOPNOTSUPP;
9658 }
9659
9660 /* If the user hasn't specified a power management mode yet, default
9661 * to BATTERY */
9662 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9663 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9664 else
9665 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9666
9667 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9668 if (err) {
9669 IPW_DEBUG_WX("failed setting power mode.\n");
9670 mutex_unlock(&priv->mutex);
9671 return err;
9672 }
9673
9674 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9675 mutex_unlock(&priv->mutex);
9676 return 0;
9677 }
9678
9679 static int ipw_wx_get_power(struct net_device *dev,
9680 struct iw_request_info *info,
9681 union iwreq_data *wrqu, char *extra)
9682 {
9683 struct ipw_priv *priv = libipw_priv(dev);
9684 mutex_lock(&priv->mutex);
9685 if (!(priv->power_mode & IPW_POWER_ENABLED))
9686 wrqu->power.disabled = 1;
9687 else
9688 wrqu->power.disabled = 0;
9689
9690 mutex_unlock(&priv->mutex);
9691 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9692
9693 return 0;
9694 }
9695
9696 static int ipw_wx_set_powermode(struct net_device *dev,
9697 struct iw_request_info *info,
9698 union iwreq_data *wrqu, char *extra)
9699 {
9700 struct ipw_priv *priv = libipw_priv(dev);
9701 int mode = *(int *)extra;
9702 int err;
9703
9704 mutex_lock(&priv->mutex);
9705 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9706 mode = IPW_POWER_AC;
9707
9708 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9709 err = ipw_send_power_mode(priv, mode);
9710 if (err) {
9711 IPW_DEBUG_WX("failed setting power mode.\n");
9712 mutex_unlock(&priv->mutex);
9713 return err;
9714 }
9715 priv->power_mode = IPW_POWER_ENABLED | mode;
9716 }
9717 mutex_unlock(&priv->mutex);
9718 return 0;
9719 }
9720
9721 #define MAX_WX_STRING 80
9722 static int ipw_wx_get_powermode(struct net_device *dev,
9723 struct iw_request_info *info,
9724 union iwreq_data *wrqu, char *extra)
9725 {
9726 struct ipw_priv *priv = libipw_priv(dev);
9727 int level = IPW_POWER_LEVEL(priv->power_mode);
9728 char *p = extra;
9729
9730 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9731
9732 switch (level) {
9733 case IPW_POWER_AC:
9734 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9735 break;
9736 case IPW_POWER_BATTERY:
9737 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9738 break;
9739 default:
9740 p += snprintf(p, MAX_WX_STRING - (p - extra),
9741 "(Timeout %dms, Period %dms)",
9742 timeout_duration[level - 1] / 1000,
9743 period_duration[level - 1] / 1000);
9744 }
9745
9746 if (!(priv->power_mode & IPW_POWER_ENABLED))
9747 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9748
9749 wrqu->data.length = p - extra + 1;
9750
9751 return 0;
9752 }
9753
9754 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9755 struct iw_request_info *info,
9756 union iwreq_data *wrqu, char *extra)
9757 {
9758 struct ipw_priv *priv = libipw_priv(dev);
9759 int mode = *(int *)extra;
9760 u8 band = 0, modulation = 0;
9761
9762 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9763 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9764 return -EINVAL;
9765 }
9766 mutex_lock(&priv->mutex);
9767 if (priv->adapter == IPW_2915ABG) {
9768 priv->ieee->abg_true = 1;
9769 if (mode & IEEE_A) {
9770 band |= LIBIPW_52GHZ_BAND;
9771 modulation |= LIBIPW_OFDM_MODULATION;
9772 } else
9773 priv->ieee->abg_true = 0;
9774 } else {
9775 if (mode & IEEE_A) {
9776 IPW_WARNING("Attempt to set 2200BG into "
9777 "802.11a mode\n");
9778 mutex_unlock(&priv->mutex);
9779 return -EINVAL;
9780 }
9781
9782 priv->ieee->abg_true = 0;
9783 }
9784
9785 if (mode & IEEE_B) {
9786 band |= LIBIPW_24GHZ_BAND;
9787 modulation |= LIBIPW_CCK_MODULATION;
9788 } else
9789 priv->ieee->abg_true = 0;
9790
9791 if (mode & IEEE_G) {
9792 band |= LIBIPW_24GHZ_BAND;
9793 modulation |= LIBIPW_OFDM_MODULATION;
9794 } else
9795 priv->ieee->abg_true = 0;
9796
9797 priv->ieee->mode = mode;
9798 priv->ieee->freq_band = band;
9799 priv->ieee->modulation = modulation;
9800 init_supported_rates(priv, &priv->rates);
9801
9802 /* Network configuration changed -- force [re]association */
9803 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9804 if (!ipw_disassociate(priv)) {
9805 ipw_send_supported_rates(priv, &priv->rates);
9806 ipw_associate(priv);
9807 }
9808
9809 /* Update the band LEDs */
9810 ipw_led_band_on(priv);
9811
9812 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9813 mode & IEEE_A ? 'a' : '.',
9814 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9815 mutex_unlock(&priv->mutex);
9816 return 0;
9817 }
9818
9819 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9820 struct iw_request_info *info,
9821 union iwreq_data *wrqu, char *extra)
9822 {
9823 struct ipw_priv *priv = libipw_priv(dev);
9824 mutex_lock(&priv->mutex);
9825 switch (priv->ieee->mode) {
9826 case IEEE_A:
9827 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9828 break;
9829 case IEEE_B:
9830 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9831 break;
9832 case IEEE_A | IEEE_B:
9833 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9834 break;
9835 case IEEE_G:
9836 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9837 break;
9838 case IEEE_A | IEEE_G:
9839 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9840 break;
9841 case IEEE_B | IEEE_G:
9842 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9843 break;
9844 case IEEE_A | IEEE_B | IEEE_G:
9845 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9846 break;
9847 default:
9848 strncpy(extra, "unknown", MAX_WX_STRING);
9849 break;
9850 }
9851
9852 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9853
9854 wrqu->data.length = strlen(extra) + 1;
9855 mutex_unlock(&priv->mutex);
9856
9857 return 0;
9858 }
9859
9860 static int ipw_wx_set_preamble(struct net_device *dev,
9861 struct iw_request_info *info,
9862 union iwreq_data *wrqu, char *extra)
9863 {
9864 struct ipw_priv *priv = libipw_priv(dev);
9865 int mode = *(int *)extra;
9866 mutex_lock(&priv->mutex);
9867 /* Switching from SHORT -> LONG requires a disassociation */
9868 if (mode == 1) {
9869 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9870 priv->config |= CFG_PREAMBLE_LONG;
9871
9872 /* Network configuration changed -- force [re]association */
9873 IPW_DEBUG_ASSOC
9874 ("[re]association triggered due to preamble change.\n");
9875 if (!ipw_disassociate(priv))
9876 ipw_associate(priv);
9877 }
9878 goto done;
9879 }
9880
9881 if (mode == 0) {
9882 priv->config &= ~CFG_PREAMBLE_LONG;
9883 goto done;
9884 }
9885 mutex_unlock(&priv->mutex);
9886 return -EINVAL;
9887
9888 done:
9889 mutex_unlock(&priv->mutex);
9890 return 0;
9891 }
9892
9893 static int ipw_wx_get_preamble(struct net_device *dev,
9894 struct iw_request_info *info,
9895 union iwreq_data *wrqu, char *extra)
9896 {
9897 struct ipw_priv *priv = libipw_priv(dev);
9898 mutex_lock(&priv->mutex);
9899 if (priv->config & CFG_PREAMBLE_LONG)
9900 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9901 else
9902 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9903 mutex_unlock(&priv->mutex);
9904 return 0;
9905 }
9906
9907 #ifdef CONFIG_IPW2200_MONITOR
9908 static int ipw_wx_set_monitor(struct net_device *dev,
9909 struct iw_request_info *info,
9910 union iwreq_data *wrqu, char *extra)
9911 {
9912 struct ipw_priv *priv = libipw_priv(dev);
9913 int *parms = (int *)extra;
9914 int enable = (parms[0] > 0);
9915 mutex_lock(&priv->mutex);
9916 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9917 if (enable) {
9918 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9919 #ifdef CONFIG_IPW2200_RADIOTAP
9920 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9921 #else
9922 priv->net_dev->type = ARPHRD_IEEE80211;
9923 #endif
9924 schedule_work(&priv->adapter_restart);
9925 }
9926
9927 ipw_set_channel(priv, parms[1]);
9928 } else {
9929 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9930 mutex_unlock(&priv->mutex);
9931 return 0;
9932 }
9933 priv->net_dev->type = ARPHRD_ETHER;
9934 schedule_work(&priv->adapter_restart);
9935 }
9936 mutex_unlock(&priv->mutex);
9937 return 0;
9938 }
9939
9940 #endif /* CONFIG_IPW2200_MONITOR */
9941
9942 static int ipw_wx_reset(struct net_device *dev,
9943 struct iw_request_info *info,
9944 union iwreq_data *wrqu, char *extra)
9945 {
9946 struct ipw_priv *priv = libipw_priv(dev);
9947 IPW_DEBUG_WX("RESET\n");
9948 schedule_work(&priv->adapter_restart);
9949 return 0;
9950 }
9951
9952 static int ipw_wx_sw_reset(struct net_device *dev,
9953 struct iw_request_info *info,
9954 union iwreq_data *wrqu, char *extra)
9955 {
9956 struct ipw_priv *priv = libipw_priv(dev);
9957 union iwreq_data wrqu_sec = {
9958 .encoding = {
9959 .flags = IW_ENCODE_DISABLED,
9960 },
9961 };
9962 int ret;
9963
9964 IPW_DEBUG_WX("SW_RESET\n");
9965
9966 mutex_lock(&priv->mutex);
9967
9968 ret = ipw_sw_reset(priv, 2);
9969 if (!ret) {
9970 free_firmware();
9971 ipw_adapter_restart(priv);
9972 }
9973
9974 /* The SW reset bit might have been toggled on by the 'disable'
9975 * module parameter, so take appropriate action */
9976 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9977
9978 mutex_unlock(&priv->mutex);
9979 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9980 mutex_lock(&priv->mutex);
9981
9982 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9983 /* Configuration likely changed -- force [re]association */
9984 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9985 "reset.\n");
9986 if (!ipw_disassociate(priv))
9987 ipw_associate(priv);
9988 }
9989
9990 mutex_unlock(&priv->mutex);
9991
9992 return 0;
9993 }
9994
9995 /* Rebase the WE IOCTLs to zero for the handler array */
9996 static iw_handler ipw_wx_handlers[] = {
9997 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9998 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9999 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10000 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10001 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10002 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10003 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10004 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10005 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10006 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10007 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10008 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10009 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10010 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10011 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10012 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10013 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10014 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10015 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10016 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10017 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10018 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10019 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10020 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10021 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10022 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10023 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10024 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10025 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10026 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10027 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10028 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10029 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10030 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10031 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10032 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10033 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10034 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10035 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10036 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10037 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10038 };
10039
10040 enum {
10041 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10042 IPW_PRIV_GET_POWER,
10043 IPW_PRIV_SET_MODE,
10044 IPW_PRIV_GET_MODE,
10045 IPW_PRIV_SET_PREAMBLE,
10046 IPW_PRIV_GET_PREAMBLE,
10047 IPW_PRIV_RESET,
10048 IPW_PRIV_SW_RESET,
10049 #ifdef CONFIG_IPW2200_MONITOR
10050 IPW_PRIV_SET_MONITOR,
10051 #endif
10052 };
10053
10054 static struct iw_priv_args ipw_priv_args[] = {
10055 {
10056 .cmd = IPW_PRIV_SET_POWER,
10057 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10058 .name = "set_power"},
10059 {
10060 .cmd = IPW_PRIV_GET_POWER,
10061 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10062 .name = "get_power"},
10063 {
10064 .cmd = IPW_PRIV_SET_MODE,
10065 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10066 .name = "set_mode"},
10067 {
10068 .cmd = IPW_PRIV_GET_MODE,
10069 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10070 .name = "get_mode"},
10071 {
10072 .cmd = IPW_PRIV_SET_PREAMBLE,
10073 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10074 .name = "set_preamble"},
10075 {
10076 .cmd = IPW_PRIV_GET_PREAMBLE,
10077 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10078 .name = "get_preamble"},
10079 {
10080 IPW_PRIV_RESET,
10081 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10082 {
10083 IPW_PRIV_SW_RESET,
10084 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10085 #ifdef CONFIG_IPW2200_MONITOR
10086 {
10087 IPW_PRIV_SET_MONITOR,
10088 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10089 #endif /* CONFIG_IPW2200_MONITOR */
10090 };
10091
10092 static iw_handler ipw_priv_handler[] = {
10093 ipw_wx_set_powermode,
10094 ipw_wx_get_powermode,
10095 ipw_wx_set_wireless_mode,
10096 ipw_wx_get_wireless_mode,
10097 ipw_wx_set_preamble,
10098 ipw_wx_get_preamble,
10099 ipw_wx_reset,
10100 ipw_wx_sw_reset,
10101 #ifdef CONFIG_IPW2200_MONITOR
10102 ipw_wx_set_monitor,
10103 #endif
10104 };
10105
10106 static struct iw_handler_def ipw_wx_handler_def = {
10107 .standard = ipw_wx_handlers,
10108 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10109 .num_private = ARRAY_SIZE(ipw_priv_handler),
10110 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10111 .private = ipw_priv_handler,
10112 .private_args = ipw_priv_args,
10113 .get_wireless_stats = ipw_get_wireless_stats,
10114 };
10115
10116 /*
10117 * Get wireless statistics.
10118 * Called by /proc/net/wireless
10119 * Also called by SIOCGIWSTATS
10120 */
10121 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10122 {
10123 struct ipw_priv *priv = libipw_priv(dev);
10124 struct iw_statistics *wstats;
10125
10126 wstats = &priv->wstats;
10127
10128 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10129 * netdev->get_wireless_stats seems to be called before fw is
10130 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10131 * and associated; if not associcated, the values are all meaningless
10132 * anyway, so set them all to NULL and INVALID */
10133 if (!(priv->status & STATUS_ASSOCIATED)) {
10134 wstats->miss.beacon = 0;
10135 wstats->discard.retries = 0;
10136 wstats->qual.qual = 0;
10137 wstats->qual.level = 0;
10138 wstats->qual.noise = 0;
10139 wstats->qual.updated = 7;
10140 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10141 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10142 return wstats;
10143 }
10144
10145 wstats->qual.qual = priv->quality;
10146 wstats->qual.level = priv->exp_avg_rssi;
10147 wstats->qual.noise = priv->exp_avg_noise;
10148 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10149 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10150
10151 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10152 wstats->discard.retries = priv->last_tx_failures;
10153 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10154
10155 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10156 goto fail_get_ordinal;
10157 wstats->discard.retries += tx_retry; */
10158
10159 return wstats;
10160 }
10161
10162 /* net device stuff */
10163
10164 static void init_sys_config(struct ipw_sys_config *sys_config)
10165 {
10166 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10167 sys_config->bt_coexistence = 0;
10168 sys_config->answer_broadcast_ssid_probe = 0;
10169 sys_config->accept_all_data_frames = 0;
10170 sys_config->accept_non_directed_frames = 1;
10171 sys_config->exclude_unicast_unencrypted = 0;
10172 sys_config->disable_unicast_decryption = 1;
10173 sys_config->exclude_multicast_unencrypted = 0;
10174 sys_config->disable_multicast_decryption = 1;
10175 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10176 antenna = CFG_SYS_ANTENNA_BOTH;
10177 sys_config->antenna_diversity = antenna;
10178 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10179 sys_config->dot11g_auto_detection = 0;
10180 sys_config->enable_cts_to_self = 0;
10181 sys_config->bt_coexist_collision_thr = 0;
10182 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10183 sys_config->silence_threshold = 0x1e;
10184 }
10185
10186 static int ipw_net_open(struct net_device *dev)
10187 {
10188 IPW_DEBUG_INFO("dev->open\n");
10189 netif_start_queue(dev);
10190 return 0;
10191 }
10192
10193 static int ipw_net_stop(struct net_device *dev)
10194 {
10195 IPW_DEBUG_INFO("dev->close\n");
10196 netif_stop_queue(dev);
10197 return 0;
10198 }
10199
10200 /*
10201 todo:
10202
10203 modify to send one tfd per fragment instead of using chunking. otherwise
10204 we need to heavily modify the libipw_skb_to_txb.
10205 */
10206
10207 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10208 int pri)
10209 {
10210 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10211 txb->fragments[0]->data;
10212 int i = 0;
10213 struct tfd_frame *tfd;
10214 #ifdef CONFIG_IPW2200_QOS
10215 int tx_id = ipw_get_tx_queue_number(priv, pri);
10216 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10217 #else
10218 struct clx2_tx_queue *txq = &priv->txq[0];
10219 #endif
10220 struct clx2_queue *q = &txq->q;
10221 u8 id, hdr_len, unicast;
10222 int fc;
10223
10224 if (!(priv->status & STATUS_ASSOCIATED))
10225 goto drop;
10226
10227 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10228 switch (priv->ieee->iw_mode) {
10229 case IW_MODE_ADHOC:
10230 unicast = !is_multicast_ether_addr(hdr->addr1);
10231 id = ipw_find_station(priv, hdr->addr1);
10232 if (id == IPW_INVALID_STATION) {
10233 id = ipw_add_station(priv, hdr->addr1);
10234 if (id == IPW_INVALID_STATION) {
10235 IPW_WARNING("Attempt to send data to "
10236 "invalid cell: %pM\n",
10237 hdr->addr1);
10238 goto drop;
10239 }
10240 }
10241 break;
10242
10243 case IW_MODE_INFRA:
10244 default:
10245 unicast = !is_multicast_ether_addr(hdr->addr3);
10246 id = 0;
10247 break;
10248 }
10249
10250 tfd = &txq->bd[q->first_empty];
10251 txq->txb[q->first_empty] = txb;
10252 memset(tfd, 0, sizeof(*tfd));
10253 tfd->u.data.station_number = id;
10254
10255 tfd->control_flags.message_type = TX_FRAME_TYPE;
10256 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10257
10258 tfd->u.data.cmd_id = DINO_CMD_TX;
10259 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10260
10261 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10262 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10263 else
10264 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10265
10266 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10267 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10268
10269 fc = le16_to_cpu(hdr->frame_ctl);
10270 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10271
10272 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10273
10274 if (likely(unicast))
10275 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10276
10277 if (txb->encrypted && !priv->ieee->host_encrypt) {
10278 switch (priv->ieee->sec.level) {
10279 case SEC_LEVEL_3:
10280 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10281 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10282 /* XXX: ACK flag must be set for CCMP even if it
10283 * is a multicast/broadcast packet, because CCMP
10284 * group communication encrypted by GTK is
10285 * actually done by the AP. */
10286 if (!unicast)
10287 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10288
10289 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10290 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10291 tfd->u.data.key_index = 0;
10292 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10293 break;
10294 case SEC_LEVEL_2:
10295 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10296 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10297 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10298 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10299 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10300 break;
10301 case SEC_LEVEL_1:
10302 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10303 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10304 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10305 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10306 40)
10307 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10308 else
10309 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10310 break;
10311 case SEC_LEVEL_0:
10312 break;
10313 default:
10314 printk(KERN_ERR "Unknown security level %d\n",
10315 priv->ieee->sec.level);
10316 break;
10317 }
10318 } else
10319 /* No hardware encryption */
10320 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10321
10322 #ifdef CONFIG_IPW2200_QOS
10323 if (fc & IEEE80211_STYPE_QOS_DATA)
10324 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10325 #endif /* CONFIG_IPW2200_QOS */
10326
10327 /* payload */
10328 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10329 txb->nr_frags));
10330 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10331 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10332 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10333 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10334 i, le32_to_cpu(tfd->u.data.num_chunks),
10335 txb->fragments[i]->len - hdr_len);
10336 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10337 i, tfd->u.data.num_chunks,
10338 txb->fragments[i]->len - hdr_len);
10339 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10340 txb->fragments[i]->len - hdr_len);
10341
10342 tfd->u.data.chunk_ptr[i] =
10343 cpu_to_le32(pci_map_single
10344 (priv->pci_dev,
10345 txb->fragments[i]->data + hdr_len,
10346 txb->fragments[i]->len - hdr_len,
10347 PCI_DMA_TODEVICE));
10348 tfd->u.data.chunk_len[i] =
10349 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10350 }
10351
10352 if (i != txb->nr_frags) {
10353 struct sk_buff *skb;
10354 u16 remaining_bytes = 0;
10355 int j;
10356
10357 for (j = i; j < txb->nr_frags; j++)
10358 remaining_bytes += txb->fragments[j]->len - hdr_len;
10359
10360 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10361 remaining_bytes);
10362 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10363 if (skb != NULL) {
10364 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10365 for (j = i; j < txb->nr_frags; j++) {
10366 int size = txb->fragments[j]->len - hdr_len;
10367
10368 printk(KERN_INFO "Adding frag %d %d...\n",
10369 j, size);
10370 memcpy(skb_put(skb, size),
10371 txb->fragments[j]->data + hdr_len, size);
10372 }
10373 dev_kfree_skb_any(txb->fragments[i]);
10374 txb->fragments[i] = skb;
10375 tfd->u.data.chunk_ptr[i] =
10376 cpu_to_le32(pci_map_single
10377 (priv->pci_dev, skb->data,
10378 remaining_bytes,
10379 PCI_DMA_TODEVICE));
10380
10381 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10382 }
10383 }
10384
10385 /* kick DMA */
10386 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10387 ipw_write32(priv, q->reg_w, q->first_empty);
10388
10389 if (ipw_tx_queue_space(q) < q->high_mark)
10390 netif_stop_queue(priv->net_dev);
10391
10392 return NETDEV_TX_OK;
10393
10394 drop:
10395 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10396 libipw_txb_free(txb);
10397 return NETDEV_TX_OK;
10398 }
10399
10400 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10401 {
10402 struct ipw_priv *priv = libipw_priv(dev);
10403 #ifdef CONFIG_IPW2200_QOS
10404 int tx_id = ipw_get_tx_queue_number(priv, pri);
10405 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10406 #else
10407 struct clx2_tx_queue *txq = &priv->txq[0];
10408 #endif /* CONFIG_IPW2200_QOS */
10409
10410 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10411 return 1;
10412
10413 return 0;
10414 }
10415
10416 #ifdef CONFIG_IPW2200_PROMISCUOUS
10417 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10418 struct libipw_txb *txb)
10419 {
10420 struct libipw_rx_stats dummystats;
10421 struct ieee80211_hdr *hdr;
10422 u8 n;
10423 u16 filter = priv->prom_priv->filter;
10424 int hdr_only = 0;
10425
10426 if (filter & IPW_PROM_NO_TX)
10427 return;
10428
10429 memset(&dummystats, 0, sizeof(dummystats));
10430
10431 /* Filtering of fragment chains is done against the first fragment */
10432 hdr = (void *)txb->fragments[0]->data;
10433 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10434 if (filter & IPW_PROM_NO_MGMT)
10435 return;
10436 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10437 hdr_only = 1;
10438 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10439 if (filter & IPW_PROM_NO_CTL)
10440 return;
10441 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10442 hdr_only = 1;
10443 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10444 if (filter & IPW_PROM_NO_DATA)
10445 return;
10446 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10447 hdr_only = 1;
10448 }
10449
10450 for(n=0; n<txb->nr_frags; ++n) {
10451 struct sk_buff *src = txb->fragments[n];
10452 struct sk_buff *dst;
10453 struct ieee80211_radiotap_header *rt_hdr;
10454 int len;
10455
10456 if (hdr_only) {
10457 hdr = (void *)src->data;
10458 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10459 } else
10460 len = src->len;
10461
10462 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10463 if (!dst)
10464 continue;
10465
10466 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10467
10468 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10469 rt_hdr->it_pad = 0;
10470 rt_hdr->it_present = 0; /* after all, it's just an idea */
10471 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10472
10473 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10474 ieee80211chan2mhz(priv->channel));
10475 if (priv->channel > 14) /* 802.11a */
10476 *(__le16*)skb_put(dst, sizeof(u16)) =
10477 cpu_to_le16(IEEE80211_CHAN_OFDM |
10478 IEEE80211_CHAN_5GHZ);
10479 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10480 *(__le16*)skb_put(dst, sizeof(u16)) =
10481 cpu_to_le16(IEEE80211_CHAN_CCK |
10482 IEEE80211_CHAN_2GHZ);
10483 else /* 802.11g */
10484 *(__le16*)skb_put(dst, sizeof(u16)) =
10485 cpu_to_le16(IEEE80211_CHAN_OFDM |
10486 IEEE80211_CHAN_2GHZ);
10487
10488 rt_hdr->it_len = cpu_to_le16(dst->len);
10489
10490 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10491
10492 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10493 dev_kfree_skb_any(dst);
10494 }
10495 }
10496 #endif
10497
10498 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10499 struct net_device *dev, int pri)
10500 {
10501 struct ipw_priv *priv = libipw_priv(dev);
10502 unsigned long flags;
10503 netdev_tx_t ret;
10504
10505 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10506 spin_lock_irqsave(&priv->lock, flags);
10507
10508 #ifdef CONFIG_IPW2200_PROMISCUOUS
10509 if (rtap_iface && netif_running(priv->prom_net_dev))
10510 ipw_handle_promiscuous_tx(priv, txb);
10511 #endif
10512
10513 ret = ipw_tx_skb(priv, txb, pri);
10514 if (ret == NETDEV_TX_OK)
10515 __ipw_led_activity_on(priv);
10516 spin_unlock_irqrestore(&priv->lock, flags);
10517
10518 return ret;
10519 }
10520
10521 static void ipw_net_set_multicast_list(struct net_device *dev)
10522 {
10523
10524 }
10525
10526 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10527 {
10528 struct ipw_priv *priv = libipw_priv(dev);
10529 struct sockaddr *addr = p;
10530
10531 if (!is_valid_ether_addr(addr->sa_data))
10532 return -EADDRNOTAVAIL;
10533 mutex_lock(&priv->mutex);
10534 priv->config |= CFG_CUSTOM_MAC;
10535 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10536 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10537 priv->net_dev->name, priv->mac_addr);
10538 schedule_work(&priv->adapter_restart);
10539 mutex_unlock(&priv->mutex);
10540 return 0;
10541 }
10542
10543 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10544 struct ethtool_drvinfo *info)
10545 {
10546 struct ipw_priv *p = libipw_priv(dev);
10547 char vers[64];
10548 char date[32];
10549 u32 len;
10550
10551 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10552 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10553
10554 len = sizeof(vers);
10555 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10556 len = sizeof(date);
10557 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10558
10559 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10560 vers, date);
10561 strlcpy(info->bus_info, pci_name(p->pci_dev),
10562 sizeof(info->bus_info));
10563 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10564 }
10565
10566 static u32 ipw_ethtool_get_link(struct net_device *dev)
10567 {
10568 struct ipw_priv *priv = libipw_priv(dev);
10569 return (priv->status & STATUS_ASSOCIATED) != 0;
10570 }
10571
10572 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10573 {
10574 return IPW_EEPROM_IMAGE_SIZE;
10575 }
10576
10577 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10578 struct ethtool_eeprom *eeprom, u8 * bytes)
10579 {
10580 struct ipw_priv *p = libipw_priv(dev);
10581
10582 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10583 return -EINVAL;
10584 mutex_lock(&p->mutex);
10585 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10586 mutex_unlock(&p->mutex);
10587 return 0;
10588 }
10589
10590 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10591 struct ethtool_eeprom *eeprom, u8 * bytes)
10592 {
10593 struct ipw_priv *p = libipw_priv(dev);
10594 int i;
10595
10596 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10597 return -EINVAL;
10598 mutex_lock(&p->mutex);
10599 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10600 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10601 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10602 mutex_unlock(&p->mutex);
10603 return 0;
10604 }
10605
10606 static const struct ethtool_ops ipw_ethtool_ops = {
10607 .get_link = ipw_ethtool_get_link,
10608 .get_drvinfo = ipw_ethtool_get_drvinfo,
10609 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10610 .get_eeprom = ipw_ethtool_get_eeprom,
10611 .set_eeprom = ipw_ethtool_set_eeprom,
10612 };
10613
10614 static irqreturn_t ipw_isr(int irq, void *data)
10615 {
10616 struct ipw_priv *priv = data;
10617 u32 inta, inta_mask;
10618
10619 if (!priv)
10620 return IRQ_NONE;
10621
10622 spin_lock(&priv->irq_lock);
10623
10624 if (!(priv->status & STATUS_INT_ENABLED)) {
10625 /* IRQ is disabled */
10626 goto none;
10627 }
10628
10629 inta = ipw_read32(priv, IPW_INTA_RW);
10630 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10631
10632 if (inta == 0xFFFFFFFF) {
10633 /* Hardware disappeared */
10634 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10635 goto none;
10636 }
10637
10638 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10639 /* Shared interrupt */
10640 goto none;
10641 }
10642
10643 /* tell the device to stop sending interrupts */
10644 __ipw_disable_interrupts(priv);
10645
10646 /* ack current interrupts */
10647 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10648 ipw_write32(priv, IPW_INTA_RW, inta);
10649
10650 /* Cache INTA value for our tasklet */
10651 priv->isr_inta = inta;
10652
10653 tasklet_schedule(&priv->irq_tasklet);
10654
10655 spin_unlock(&priv->irq_lock);
10656
10657 return IRQ_HANDLED;
10658 none:
10659 spin_unlock(&priv->irq_lock);
10660 return IRQ_NONE;
10661 }
10662
10663 static void ipw_rf_kill(void *adapter)
10664 {
10665 struct ipw_priv *priv = adapter;
10666 unsigned long flags;
10667
10668 spin_lock_irqsave(&priv->lock, flags);
10669
10670 if (rf_kill_active(priv)) {
10671 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10672 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10673 goto exit_unlock;
10674 }
10675
10676 /* RF Kill is now disabled, so bring the device back up */
10677
10678 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10679 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10680 "device\n");
10681
10682 /* we can not do an adapter restart while inside an irq lock */
10683 schedule_work(&priv->adapter_restart);
10684 } else
10685 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10686 "enabled\n");
10687
10688 exit_unlock:
10689 spin_unlock_irqrestore(&priv->lock, flags);
10690 }
10691
10692 static void ipw_bg_rf_kill(struct work_struct *work)
10693 {
10694 struct ipw_priv *priv =
10695 container_of(work, struct ipw_priv, rf_kill.work);
10696 mutex_lock(&priv->mutex);
10697 ipw_rf_kill(priv);
10698 mutex_unlock(&priv->mutex);
10699 }
10700
10701 static void ipw_link_up(struct ipw_priv *priv)
10702 {
10703 priv->last_seq_num = -1;
10704 priv->last_frag_num = -1;
10705 priv->last_packet_time = 0;
10706
10707 netif_carrier_on(priv->net_dev);
10708
10709 cancel_delayed_work(&priv->request_scan);
10710 cancel_delayed_work(&priv->request_direct_scan);
10711 cancel_delayed_work(&priv->request_passive_scan);
10712 cancel_delayed_work(&priv->scan_event);
10713 ipw_reset_stats(priv);
10714 /* Ensure the rate is updated immediately */
10715 priv->last_rate = ipw_get_current_rate(priv);
10716 ipw_gather_stats(priv);
10717 ipw_led_link_up(priv);
10718 notify_wx_assoc_event(priv);
10719
10720 if (priv->config & CFG_BACKGROUND_SCAN)
10721 schedule_delayed_work(&priv->request_scan, HZ);
10722 }
10723
10724 static void ipw_bg_link_up(struct work_struct *work)
10725 {
10726 struct ipw_priv *priv =
10727 container_of(work, struct ipw_priv, link_up);
10728 mutex_lock(&priv->mutex);
10729 ipw_link_up(priv);
10730 mutex_unlock(&priv->mutex);
10731 }
10732
10733 static void ipw_link_down(struct ipw_priv *priv)
10734 {
10735 ipw_led_link_down(priv);
10736 netif_carrier_off(priv->net_dev);
10737 notify_wx_assoc_event(priv);
10738
10739 /* Cancel any queued work ... */
10740 cancel_delayed_work(&priv->request_scan);
10741 cancel_delayed_work(&priv->request_direct_scan);
10742 cancel_delayed_work(&priv->request_passive_scan);
10743 cancel_delayed_work(&priv->adhoc_check);
10744 cancel_delayed_work(&priv->gather_stats);
10745
10746 ipw_reset_stats(priv);
10747
10748 if (!(priv->status & STATUS_EXIT_PENDING)) {
10749 /* Queue up another scan... */
10750 schedule_delayed_work(&priv->request_scan, 0);
10751 } else
10752 cancel_delayed_work(&priv->scan_event);
10753 }
10754
10755 static void ipw_bg_link_down(struct work_struct *work)
10756 {
10757 struct ipw_priv *priv =
10758 container_of(work, struct ipw_priv, link_down);
10759 mutex_lock(&priv->mutex);
10760 ipw_link_down(priv);
10761 mutex_unlock(&priv->mutex);
10762 }
10763
10764 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10765 {
10766 int ret = 0;
10767
10768 init_waitqueue_head(&priv->wait_command_queue);
10769 init_waitqueue_head(&priv->wait_state);
10770
10771 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10772 INIT_WORK(&priv->associate, ipw_bg_associate);
10773 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10774 INIT_WORK(&priv->system_config, ipw_system_config);
10775 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10776 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10777 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10778 INIT_WORK(&priv->up, ipw_bg_up);
10779 INIT_WORK(&priv->down, ipw_bg_down);
10780 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10781 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10782 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10783 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10784 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10785 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10786 INIT_WORK(&priv->roam, ipw_bg_roam);
10787 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10788 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10789 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10790 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10791 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10792 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10793 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10794
10795 #ifdef CONFIG_IPW2200_QOS
10796 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10797 #endif /* CONFIG_IPW2200_QOS */
10798
10799 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10800 ipw_irq_tasklet, (unsigned long)priv);
10801
10802 return ret;
10803 }
10804
10805 static void shim__set_security(struct net_device *dev,
10806 struct libipw_security *sec)
10807 {
10808 struct ipw_priv *priv = libipw_priv(dev);
10809 int i;
10810 for (i = 0; i < 4; i++) {
10811 if (sec->flags & (1 << i)) {
10812 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10813 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10814 if (sec->key_sizes[i] == 0)
10815 priv->ieee->sec.flags &= ~(1 << i);
10816 else {
10817 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10818 sec->key_sizes[i]);
10819 priv->ieee->sec.flags |= (1 << i);
10820 }
10821 priv->status |= STATUS_SECURITY_UPDATED;
10822 } else if (sec->level != SEC_LEVEL_1)
10823 priv->ieee->sec.flags &= ~(1 << i);
10824 }
10825
10826 if (sec->flags & SEC_ACTIVE_KEY) {
10827 if (sec->active_key <= 3) {
10828 priv->ieee->sec.active_key = sec->active_key;
10829 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10830 } else
10831 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10832 priv->status |= STATUS_SECURITY_UPDATED;
10833 } else
10834 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10835
10836 if ((sec->flags & SEC_AUTH_MODE) &&
10837 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10838 priv->ieee->sec.auth_mode = sec->auth_mode;
10839 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10840 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10841 priv->capability |= CAP_SHARED_KEY;
10842 else
10843 priv->capability &= ~CAP_SHARED_KEY;
10844 priv->status |= STATUS_SECURITY_UPDATED;
10845 }
10846
10847 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10848 priv->ieee->sec.flags |= SEC_ENABLED;
10849 priv->ieee->sec.enabled = sec->enabled;
10850 priv->status |= STATUS_SECURITY_UPDATED;
10851 if (sec->enabled)
10852 priv->capability |= CAP_PRIVACY_ON;
10853 else
10854 priv->capability &= ~CAP_PRIVACY_ON;
10855 }
10856
10857 if (sec->flags & SEC_ENCRYPT)
10858 priv->ieee->sec.encrypt = sec->encrypt;
10859
10860 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10861 priv->ieee->sec.level = sec->level;
10862 priv->ieee->sec.flags |= SEC_LEVEL;
10863 priv->status |= STATUS_SECURITY_UPDATED;
10864 }
10865
10866 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10867 ipw_set_hwcrypto_keys(priv);
10868
10869 /* To match current functionality of ipw2100 (which works well w/
10870 * various supplicants, we don't force a disassociate if the
10871 * privacy capability changes ... */
10872 #if 0
10873 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10874 (((priv->assoc_request.capability &
10875 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10876 (!(priv->assoc_request.capability &
10877 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10878 IPW_DEBUG_ASSOC("Disassociating due to capability "
10879 "change.\n");
10880 ipw_disassociate(priv);
10881 }
10882 #endif
10883 }
10884
10885 static int init_supported_rates(struct ipw_priv *priv,
10886 struct ipw_supported_rates *rates)
10887 {
10888 /* TODO: Mask out rates based on priv->rates_mask */
10889
10890 memset(rates, 0, sizeof(*rates));
10891 /* configure supported rates */
10892 switch (priv->ieee->freq_band) {
10893 case LIBIPW_52GHZ_BAND:
10894 rates->ieee_mode = IPW_A_MODE;
10895 rates->purpose = IPW_RATE_CAPABILITIES;
10896 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10897 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10898 break;
10899
10900 default: /* Mixed or 2.4Ghz */
10901 rates->ieee_mode = IPW_G_MODE;
10902 rates->purpose = IPW_RATE_CAPABILITIES;
10903 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10904 LIBIPW_CCK_DEFAULT_RATES_MASK);
10905 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10906 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10907 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10908 }
10909 break;
10910 }
10911
10912 return 0;
10913 }
10914
10915 static int ipw_config(struct ipw_priv *priv)
10916 {
10917 /* This is only called from ipw_up, which resets/reloads the firmware
10918 so, we don't need to first disable the card before we configure
10919 it */
10920 if (ipw_set_tx_power(priv))
10921 goto error;
10922
10923 /* initialize adapter address */
10924 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10925 goto error;
10926
10927 /* set basic system config settings */
10928 init_sys_config(&priv->sys_config);
10929
10930 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10931 * Does not support BT priority yet (don't abort or defer our Tx) */
10932 if (bt_coexist) {
10933 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10934
10935 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10936 priv->sys_config.bt_coexistence
10937 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10938 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10939 priv->sys_config.bt_coexistence
10940 |= CFG_BT_COEXISTENCE_OOB;
10941 }
10942
10943 #ifdef CONFIG_IPW2200_PROMISCUOUS
10944 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10945 priv->sys_config.accept_all_data_frames = 1;
10946 priv->sys_config.accept_non_directed_frames = 1;
10947 priv->sys_config.accept_all_mgmt_bcpr = 1;
10948 priv->sys_config.accept_all_mgmt_frames = 1;
10949 }
10950 #endif
10951
10952 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10953 priv->sys_config.answer_broadcast_ssid_probe = 1;
10954 else
10955 priv->sys_config.answer_broadcast_ssid_probe = 0;
10956
10957 if (ipw_send_system_config(priv))
10958 goto error;
10959
10960 init_supported_rates(priv, &priv->rates);
10961 if (ipw_send_supported_rates(priv, &priv->rates))
10962 goto error;
10963
10964 /* Set request-to-send threshold */
10965 if (priv->rts_threshold) {
10966 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10967 goto error;
10968 }
10969 #ifdef CONFIG_IPW2200_QOS
10970 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10971 ipw_qos_activate(priv, NULL);
10972 #endif /* CONFIG_IPW2200_QOS */
10973
10974 if (ipw_set_random_seed(priv))
10975 goto error;
10976
10977 /* final state transition to the RUN state */
10978 if (ipw_send_host_complete(priv))
10979 goto error;
10980
10981 priv->status |= STATUS_INIT;
10982
10983 ipw_led_init(priv);
10984 ipw_led_radio_on(priv);
10985 priv->notif_missed_beacons = 0;
10986
10987 /* Set hardware WEP key if it is configured. */
10988 if ((priv->capability & CAP_PRIVACY_ON) &&
10989 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10990 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10991 ipw_set_hwcrypto_keys(priv);
10992
10993 return 0;
10994
10995 error:
10996 return -EIO;
10997 }
10998
10999 /*
11000 * NOTE:
11001 *
11002 * These tables have been tested in conjunction with the
11003 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11004 *
11005 * Altering this values, using it on other hardware, or in geographies
11006 * not intended for resale of the above mentioned Intel adapters has
11007 * not been tested.
11008 *
11009 * Remember to update the table in README.ipw2200 when changing this
11010 * table.
11011 *
11012 */
11013 static const struct libipw_geo ipw_geos[] = {
11014 { /* Restricted */
11015 "---",
11016 .bg_channels = 11,
11017 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11018 {2427, 4}, {2432, 5}, {2437, 6},
11019 {2442, 7}, {2447, 8}, {2452, 9},
11020 {2457, 10}, {2462, 11}},
11021 },
11022
11023 { /* Custom US/Canada */
11024 "ZZF",
11025 .bg_channels = 11,
11026 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11027 {2427, 4}, {2432, 5}, {2437, 6},
11028 {2442, 7}, {2447, 8}, {2452, 9},
11029 {2457, 10}, {2462, 11}},
11030 .a_channels = 8,
11031 .a = {{5180, 36},
11032 {5200, 40},
11033 {5220, 44},
11034 {5240, 48},
11035 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11036 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11037 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11038 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11039 },
11040
11041 { /* Rest of World */
11042 "ZZD",
11043 .bg_channels = 13,
11044 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11045 {2427, 4}, {2432, 5}, {2437, 6},
11046 {2442, 7}, {2447, 8}, {2452, 9},
11047 {2457, 10}, {2462, 11}, {2467, 12},
11048 {2472, 13}},
11049 },
11050
11051 { /* Custom USA & Europe & High */
11052 "ZZA",
11053 .bg_channels = 11,
11054 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11055 {2427, 4}, {2432, 5}, {2437, 6},
11056 {2442, 7}, {2447, 8}, {2452, 9},
11057 {2457, 10}, {2462, 11}},
11058 .a_channels = 13,
11059 .a = {{5180, 36},
11060 {5200, 40},
11061 {5220, 44},
11062 {5240, 48},
11063 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11064 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11065 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11066 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11067 {5745, 149},
11068 {5765, 153},
11069 {5785, 157},
11070 {5805, 161},
11071 {5825, 165}},
11072 },
11073
11074 { /* Custom NA & Europe */
11075 "ZZB",
11076 .bg_channels = 11,
11077 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11078 {2427, 4}, {2432, 5}, {2437, 6},
11079 {2442, 7}, {2447, 8}, {2452, 9},
11080 {2457, 10}, {2462, 11}},
11081 .a_channels = 13,
11082 .a = {{5180, 36},
11083 {5200, 40},
11084 {5220, 44},
11085 {5240, 48},
11086 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11087 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11088 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11089 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11090 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11091 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11092 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11093 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11094 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11095 },
11096
11097 { /* Custom Japan */
11098 "ZZC",
11099 .bg_channels = 11,
11100 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11101 {2427, 4}, {2432, 5}, {2437, 6},
11102 {2442, 7}, {2447, 8}, {2452, 9},
11103 {2457, 10}, {2462, 11}},
11104 .a_channels = 4,
11105 .a = {{5170, 34}, {5190, 38},
11106 {5210, 42}, {5230, 46}},
11107 },
11108
11109 { /* Custom */
11110 "ZZM",
11111 .bg_channels = 11,
11112 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11113 {2427, 4}, {2432, 5}, {2437, 6},
11114 {2442, 7}, {2447, 8}, {2452, 9},
11115 {2457, 10}, {2462, 11}},
11116 },
11117
11118 { /* Europe */
11119 "ZZE",
11120 .bg_channels = 13,
11121 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11122 {2427, 4}, {2432, 5}, {2437, 6},
11123 {2442, 7}, {2447, 8}, {2452, 9},
11124 {2457, 10}, {2462, 11}, {2467, 12},
11125 {2472, 13}},
11126 .a_channels = 19,
11127 .a = {{5180, 36},
11128 {5200, 40},
11129 {5220, 44},
11130 {5240, 48},
11131 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11132 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11133 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11134 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11135 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11136 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11137 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11138 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11139 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11140 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11141 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11142 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11143 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11144 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11145 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11146 },
11147
11148 { /* Custom Japan */
11149 "ZZJ",
11150 .bg_channels = 14,
11151 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11152 {2427, 4}, {2432, 5}, {2437, 6},
11153 {2442, 7}, {2447, 8}, {2452, 9},
11154 {2457, 10}, {2462, 11}, {2467, 12},
11155 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11156 .a_channels = 4,
11157 .a = {{5170, 34}, {5190, 38},
11158 {5210, 42}, {5230, 46}},
11159 },
11160
11161 { /* Rest of World */
11162 "ZZR",
11163 .bg_channels = 14,
11164 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11165 {2427, 4}, {2432, 5}, {2437, 6},
11166 {2442, 7}, {2447, 8}, {2452, 9},
11167 {2457, 10}, {2462, 11}, {2467, 12},
11168 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11169 LIBIPW_CH_PASSIVE_ONLY}},
11170 },
11171
11172 { /* High Band */
11173 "ZZH",
11174 .bg_channels = 13,
11175 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11176 {2427, 4}, {2432, 5}, {2437, 6},
11177 {2442, 7}, {2447, 8}, {2452, 9},
11178 {2457, 10}, {2462, 11},
11179 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11180 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11181 .a_channels = 4,
11182 .a = {{5745, 149}, {5765, 153},
11183 {5785, 157}, {5805, 161}},
11184 },
11185
11186 { /* Custom Europe */
11187 "ZZG",
11188 .bg_channels = 13,
11189 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11190 {2427, 4}, {2432, 5}, {2437, 6},
11191 {2442, 7}, {2447, 8}, {2452, 9},
11192 {2457, 10}, {2462, 11},
11193 {2467, 12}, {2472, 13}},
11194 .a_channels = 4,
11195 .a = {{5180, 36}, {5200, 40},
11196 {5220, 44}, {5240, 48}},
11197 },
11198
11199 { /* Europe */
11200 "ZZK",
11201 .bg_channels = 13,
11202 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11203 {2427, 4}, {2432, 5}, {2437, 6},
11204 {2442, 7}, {2447, 8}, {2452, 9},
11205 {2457, 10}, {2462, 11},
11206 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11207 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11208 .a_channels = 24,
11209 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11210 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11211 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11212 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11213 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11214 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11215 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11216 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11217 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11218 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11219 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11220 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11221 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11222 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11223 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11224 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11225 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11226 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11227 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11228 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11229 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11230 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11231 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11232 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11233 },
11234
11235 { /* Europe */
11236 "ZZL",
11237 .bg_channels = 11,
11238 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11239 {2427, 4}, {2432, 5}, {2437, 6},
11240 {2442, 7}, {2447, 8}, {2452, 9},
11241 {2457, 10}, {2462, 11}},
11242 .a_channels = 13,
11243 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11244 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11245 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11246 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11247 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11248 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11249 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11250 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11251 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11252 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11253 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11254 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11255 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11256 }
11257 };
11258
11259 #define MAX_HW_RESTARTS 5
11260 static int ipw_up(struct ipw_priv *priv)
11261 {
11262 int rc, i, j;
11263
11264 /* Age scan list entries found before suspend */
11265 if (priv->suspend_time) {
11266 libipw_networks_age(priv->ieee, priv->suspend_time);
11267 priv->suspend_time = 0;
11268 }
11269
11270 if (priv->status & STATUS_EXIT_PENDING)
11271 return -EIO;
11272
11273 if (cmdlog && !priv->cmdlog) {
11274 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11275 GFP_KERNEL);
11276 if (priv->cmdlog == NULL) {
11277 IPW_ERROR("Error allocating %d command log entries.\n",
11278 cmdlog);
11279 return -ENOMEM;
11280 } else {
11281 priv->cmdlog_len = cmdlog;
11282 }
11283 }
11284
11285 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11286 /* Load the microcode, firmware, and eeprom.
11287 * Also start the clocks. */
11288 rc = ipw_load(priv);
11289 if (rc) {
11290 IPW_ERROR("Unable to load firmware: %d\n", rc);
11291 return rc;
11292 }
11293
11294 ipw_init_ordinals(priv);
11295 if (!(priv->config & CFG_CUSTOM_MAC))
11296 eeprom_parse_mac(priv, priv->mac_addr);
11297 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11298 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11299
11300 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11301 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11302 ipw_geos[j].name, 3))
11303 break;
11304 }
11305 if (j == ARRAY_SIZE(ipw_geos)) {
11306 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11307 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11308 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11309 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11310 j = 0;
11311 }
11312 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11313 IPW_WARNING("Could not set geography.");
11314 return 0;
11315 }
11316
11317 if (priv->status & STATUS_RF_KILL_SW) {
11318 IPW_WARNING("Radio disabled by module parameter.\n");
11319 return 0;
11320 } else if (rf_kill_active(priv)) {
11321 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11322 "Kill switch must be turned off for "
11323 "wireless networking to work.\n");
11324 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11325 return 0;
11326 }
11327
11328 rc = ipw_config(priv);
11329 if (!rc) {
11330 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11331
11332 /* If configure to try and auto-associate, kick
11333 * off a scan. */
11334 schedule_delayed_work(&priv->request_scan, 0);
11335
11336 return 0;
11337 }
11338
11339 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11340 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11341 i, MAX_HW_RESTARTS);
11342
11343 /* We had an error bringing up the hardware, so take it
11344 * all the way back down so we can try again */
11345 ipw_down(priv);
11346 }
11347
11348 /* tried to restart and config the device for as long as our
11349 * patience could withstand */
11350 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11351
11352 return -EIO;
11353 }
11354
11355 static void ipw_bg_up(struct work_struct *work)
11356 {
11357 struct ipw_priv *priv =
11358 container_of(work, struct ipw_priv, up);
11359 mutex_lock(&priv->mutex);
11360 ipw_up(priv);
11361 mutex_unlock(&priv->mutex);
11362 }
11363
11364 static void ipw_deinit(struct ipw_priv *priv)
11365 {
11366 int i;
11367
11368 if (priv->status & STATUS_SCANNING) {
11369 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11370 ipw_abort_scan(priv);
11371 }
11372
11373 if (priv->status & STATUS_ASSOCIATED) {
11374 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11375 ipw_disassociate(priv);
11376 }
11377
11378 ipw_led_shutdown(priv);
11379
11380 /* Wait up to 1s for status to change to not scanning and not
11381 * associated (disassociation can take a while for a ful 802.11
11382 * exchange */
11383 for (i = 1000; i && (priv->status &
11384 (STATUS_DISASSOCIATING |
11385 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11386 udelay(10);
11387
11388 if (priv->status & (STATUS_DISASSOCIATING |
11389 STATUS_ASSOCIATED | STATUS_SCANNING))
11390 IPW_DEBUG_INFO("Still associated or scanning...\n");
11391 else
11392 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11393
11394 /* Attempt to disable the card */
11395 ipw_send_card_disable(priv, 0);
11396
11397 priv->status &= ~STATUS_INIT;
11398 }
11399
11400 static void ipw_down(struct ipw_priv *priv)
11401 {
11402 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11403
11404 priv->status |= STATUS_EXIT_PENDING;
11405
11406 if (ipw_is_init(priv))
11407 ipw_deinit(priv);
11408
11409 /* Wipe out the EXIT_PENDING status bit if we are not actually
11410 * exiting the module */
11411 if (!exit_pending)
11412 priv->status &= ~STATUS_EXIT_PENDING;
11413
11414 /* tell the device to stop sending interrupts */
11415 ipw_disable_interrupts(priv);
11416
11417 /* Clear all bits but the RF Kill */
11418 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11419 netif_carrier_off(priv->net_dev);
11420
11421 ipw_stop_nic(priv);
11422
11423 ipw_led_radio_off(priv);
11424 }
11425
11426 static void ipw_bg_down(struct work_struct *work)
11427 {
11428 struct ipw_priv *priv =
11429 container_of(work, struct ipw_priv, down);
11430 mutex_lock(&priv->mutex);
11431 ipw_down(priv);
11432 mutex_unlock(&priv->mutex);
11433 }
11434
11435 /* Called by register_netdev() */
11436 static int ipw_net_init(struct net_device *dev)
11437 {
11438 int rc = 0;
11439 struct ipw_priv *priv = libipw_priv(dev);
11440
11441 mutex_lock(&priv->mutex);
11442 if (ipw_up(priv))
11443 rc = -EIO;
11444 mutex_unlock(&priv->mutex);
11445
11446 return rc;
11447 }
11448
11449 static int ipw_wdev_init(struct net_device *dev)
11450 {
11451 int i, rc = 0;
11452 struct ipw_priv *priv = libipw_priv(dev);
11453 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11454 struct wireless_dev *wdev = &priv->ieee->wdev;
11455
11456 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11457
11458 /* fill-out priv->ieee->bg_band */
11459 if (geo->bg_channels) {
11460 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11461
11462 bg_band->band = IEEE80211_BAND_2GHZ;
11463 bg_band->n_channels = geo->bg_channels;
11464 bg_band->channels = kcalloc(geo->bg_channels,
11465 sizeof(struct ieee80211_channel),
11466 GFP_KERNEL);
11467 if (!bg_band->channels) {
11468 rc = -ENOMEM;
11469 goto out;
11470 }
11471 /* translate geo->bg to bg_band.channels */
11472 for (i = 0; i < geo->bg_channels; i++) {
11473 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11474 bg_band->channels[i].center_freq = geo->bg[i].freq;
11475 bg_band->channels[i].hw_value = geo->bg[i].channel;
11476 bg_band->channels[i].max_power = geo->bg[i].max_power;
11477 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11478 bg_band->channels[i].flags |=
11479 IEEE80211_CHAN_PASSIVE_SCAN;
11480 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11481 bg_band->channels[i].flags |=
11482 IEEE80211_CHAN_NO_IBSS;
11483 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11484 bg_band->channels[i].flags |=
11485 IEEE80211_CHAN_RADAR;
11486 /* No equivalent for LIBIPW_CH_80211H_RULES,
11487 LIBIPW_CH_UNIFORM_SPREADING, or
11488 LIBIPW_CH_B_ONLY... */
11489 }
11490 /* point at bitrate info */
11491 bg_band->bitrates = ipw2200_bg_rates;
11492 bg_band->n_bitrates = ipw2200_num_bg_rates;
11493
11494 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11495 }
11496
11497 /* fill-out priv->ieee->a_band */
11498 if (geo->a_channels) {
11499 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11500
11501 a_band->band = IEEE80211_BAND_5GHZ;
11502 a_band->n_channels = geo->a_channels;
11503 a_band->channels = kcalloc(geo->a_channels,
11504 sizeof(struct ieee80211_channel),
11505 GFP_KERNEL);
11506 if (!a_band->channels) {
11507 rc = -ENOMEM;
11508 goto out;
11509 }
11510 /* translate geo->a to a_band.channels */
11511 for (i = 0; i < geo->a_channels; i++) {
11512 a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11513 a_band->channels[i].center_freq = geo->a[i].freq;
11514 a_band->channels[i].hw_value = geo->a[i].channel;
11515 a_band->channels[i].max_power = geo->a[i].max_power;
11516 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11517 a_band->channels[i].flags |=
11518 IEEE80211_CHAN_PASSIVE_SCAN;
11519 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11520 a_band->channels[i].flags |=
11521 IEEE80211_CHAN_NO_IBSS;
11522 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11523 a_band->channels[i].flags |=
11524 IEEE80211_CHAN_RADAR;
11525 /* No equivalent for LIBIPW_CH_80211H_RULES,
11526 LIBIPW_CH_UNIFORM_SPREADING, or
11527 LIBIPW_CH_B_ONLY... */
11528 }
11529 /* point at bitrate info */
11530 a_band->bitrates = ipw2200_a_rates;
11531 a_band->n_bitrates = ipw2200_num_a_rates;
11532
11533 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11534 }
11535
11536 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11537
11538 /* With that information in place, we can now register the wiphy... */
11539 if (wiphy_register(wdev->wiphy))
11540 rc = -EIO;
11541 out:
11542 return rc;
11543 }
11544
11545 /* PCI driver stuff */
11546 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11547 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11548 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11549 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11550 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11551 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11552 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11553 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11554 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11555 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11556 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11557 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11558 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11559 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11560 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11561 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11562 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11563 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11564 {PCI_VDEVICE(INTEL, 0x104f), 0},
11565 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11566 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11567 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11568 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11569
11570 /* required last entry */
11571 {0,}
11572 };
11573
11574 MODULE_DEVICE_TABLE(pci, card_ids);
11575
11576 static struct attribute *ipw_sysfs_entries[] = {
11577 &dev_attr_rf_kill.attr,
11578 &dev_attr_direct_dword.attr,
11579 &dev_attr_indirect_byte.attr,
11580 &dev_attr_indirect_dword.attr,
11581 &dev_attr_mem_gpio_reg.attr,
11582 &dev_attr_command_event_reg.attr,
11583 &dev_attr_nic_type.attr,
11584 &dev_attr_status.attr,
11585 &dev_attr_cfg.attr,
11586 &dev_attr_error.attr,
11587 &dev_attr_event_log.attr,
11588 &dev_attr_cmd_log.attr,
11589 &dev_attr_eeprom_delay.attr,
11590 &dev_attr_ucode_version.attr,
11591 &dev_attr_rtc.attr,
11592 &dev_attr_scan_age.attr,
11593 &dev_attr_led.attr,
11594 &dev_attr_speed_scan.attr,
11595 &dev_attr_net_stats.attr,
11596 &dev_attr_channels.attr,
11597 #ifdef CONFIG_IPW2200_PROMISCUOUS
11598 &dev_attr_rtap_iface.attr,
11599 &dev_attr_rtap_filter.attr,
11600 #endif
11601 NULL
11602 };
11603
11604 static struct attribute_group ipw_attribute_group = {
11605 .name = NULL, /* put in device directory */
11606 .attrs = ipw_sysfs_entries,
11607 };
11608
11609 #ifdef CONFIG_IPW2200_PROMISCUOUS
11610 static int ipw_prom_open(struct net_device *dev)
11611 {
11612 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11613 struct ipw_priv *priv = prom_priv->priv;
11614
11615 IPW_DEBUG_INFO("prom dev->open\n");
11616 netif_carrier_off(dev);
11617
11618 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11619 priv->sys_config.accept_all_data_frames = 1;
11620 priv->sys_config.accept_non_directed_frames = 1;
11621 priv->sys_config.accept_all_mgmt_bcpr = 1;
11622 priv->sys_config.accept_all_mgmt_frames = 1;
11623
11624 ipw_send_system_config(priv);
11625 }
11626
11627 return 0;
11628 }
11629
11630 static int ipw_prom_stop(struct net_device *dev)
11631 {
11632 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11633 struct ipw_priv *priv = prom_priv->priv;
11634
11635 IPW_DEBUG_INFO("prom dev->stop\n");
11636
11637 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11638 priv->sys_config.accept_all_data_frames = 0;
11639 priv->sys_config.accept_non_directed_frames = 0;
11640 priv->sys_config.accept_all_mgmt_bcpr = 0;
11641 priv->sys_config.accept_all_mgmt_frames = 0;
11642
11643 ipw_send_system_config(priv);
11644 }
11645
11646 return 0;
11647 }
11648
11649 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11650 struct net_device *dev)
11651 {
11652 IPW_DEBUG_INFO("prom dev->xmit\n");
11653 dev_kfree_skb(skb);
11654 return NETDEV_TX_OK;
11655 }
11656
11657 static const struct net_device_ops ipw_prom_netdev_ops = {
11658 .ndo_open = ipw_prom_open,
11659 .ndo_stop = ipw_prom_stop,
11660 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11661 .ndo_change_mtu = libipw_change_mtu,
11662 .ndo_set_mac_address = eth_mac_addr,
11663 .ndo_validate_addr = eth_validate_addr,
11664 };
11665
11666 static int ipw_prom_alloc(struct ipw_priv *priv)
11667 {
11668 int rc = 0;
11669
11670 if (priv->prom_net_dev)
11671 return -EPERM;
11672
11673 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11674 if (priv->prom_net_dev == NULL)
11675 return -ENOMEM;
11676
11677 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11678 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11679 priv->prom_priv->priv = priv;
11680
11681 strcpy(priv->prom_net_dev->name, "rtap%d");
11682 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11683
11684 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11685 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11686
11687 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11688 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11689
11690 rc = register_netdev(priv->prom_net_dev);
11691 if (rc) {
11692 free_libipw(priv->prom_net_dev, 1);
11693 priv->prom_net_dev = NULL;
11694 return rc;
11695 }
11696
11697 return 0;
11698 }
11699
11700 static void ipw_prom_free(struct ipw_priv *priv)
11701 {
11702 if (!priv->prom_net_dev)
11703 return;
11704
11705 unregister_netdev(priv->prom_net_dev);
11706 free_libipw(priv->prom_net_dev, 1);
11707
11708 priv->prom_net_dev = NULL;
11709 }
11710
11711 #endif
11712
11713 static const struct net_device_ops ipw_netdev_ops = {
11714 .ndo_init = ipw_net_init,
11715 .ndo_open = ipw_net_open,
11716 .ndo_stop = ipw_net_stop,
11717 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11718 .ndo_set_mac_address = ipw_net_set_mac_address,
11719 .ndo_start_xmit = libipw_xmit,
11720 .ndo_change_mtu = libipw_change_mtu,
11721 .ndo_validate_addr = eth_validate_addr,
11722 };
11723
11724 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11725 const struct pci_device_id *ent)
11726 {
11727 int err = 0;
11728 struct net_device *net_dev;
11729 void __iomem *base;
11730 u32 length, val;
11731 struct ipw_priv *priv;
11732 int i;
11733
11734 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11735 if (net_dev == NULL) {
11736 err = -ENOMEM;
11737 goto out;
11738 }
11739
11740 priv = libipw_priv(net_dev);
11741 priv->ieee = netdev_priv(net_dev);
11742
11743 priv->net_dev = net_dev;
11744 priv->pci_dev = pdev;
11745 ipw_debug_level = debug;
11746 spin_lock_init(&priv->irq_lock);
11747 spin_lock_init(&priv->lock);
11748 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11749 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11750
11751 mutex_init(&priv->mutex);
11752 if (pci_enable_device(pdev)) {
11753 err = -ENODEV;
11754 goto out_free_libipw;
11755 }
11756
11757 pci_set_master(pdev);
11758
11759 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11760 if (!err)
11761 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11762 if (err) {
11763 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11764 goto out_pci_disable_device;
11765 }
11766
11767 pci_set_drvdata(pdev, priv);
11768
11769 err = pci_request_regions(pdev, DRV_NAME);
11770 if (err)
11771 goto out_pci_disable_device;
11772
11773 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11774 * PCI Tx retries from interfering with C3 CPU state */
11775 pci_read_config_dword(pdev, 0x40, &val);
11776 if ((val & 0x0000ff00) != 0)
11777 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11778
11779 length = pci_resource_len(pdev, 0);
11780 priv->hw_len = length;
11781
11782 base = pci_ioremap_bar(pdev, 0);
11783 if (!base) {
11784 err = -ENODEV;
11785 goto out_pci_release_regions;
11786 }
11787
11788 priv->hw_base = base;
11789 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11790 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11791
11792 err = ipw_setup_deferred_work(priv);
11793 if (err) {
11794 IPW_ERROR("Unable to setup deferred work\n");
11795 goto out_iounmap;
11796 }
11797
11798 ipw_sw_reset(priv, 1);
11799
11800 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11801 if (err) {
11802 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11803 goto out_iounmap;
11804 }
11805
11806 SET_NETDEV_DEV(net_dev, &pdev->dev);
11807
11808 mutex_lock(&priv->mutex);
11809
11810 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11811 priv->ieee->set_security = shim__set_security;
11812 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11813
11814 #ifdef CONFIG_IPW2200_QOS
11815 priv->ieee->is_qos_active = ipw_is_qos_active;
11816 priv->ieee->handle_probe_response = ipw_handle_beacon;
11817 priv->ieee->handle_beacon = ipw_handle_probe_response;
11818 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11819 #endif /* CONFIG_IPW2200_QOS */
11820
11821 priv->ieee->perfect_rssi = -20;
11822 priv->ieee->worst_rssi = -85;
11823
11824 net_dev->netdev_ops = &ipw_netdev_ops;
11825 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11826 net_dev->wireless_data = &priv->wireless_data;
11827 net_dev->wireless_handlers = &ipw_wx_handler_def;
11828 net_dev->ethtool_ops = &ipw_ethtool_ops;
11829 net_dev->irq = pdev->irq;
11830 net_dev->base_addr = (unsigned long)priv->hw_base;
11831 net_dev->mem_start = pci_resource_start(pdev, 0);
11832 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11833
11834 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11835 if (err) {
11836 IPW_ERROR("failed to create sysfs device attributes\n");
11837 mutex_unlock(&priv->mutex);
11838 goto out_release_irq;
11839 }
11840
11841 mutex_unlock(&priv->mutex);
11842 err = register_netdev(net_dev);
11843 if (err) {
11844 IPW_ERROR("failed to register network device\n");
11845 goto out_remove_sysfs;
11846 }
11847
11848 err = ipw_wdev_init(net_dev);
11849 if (err) {
11850 IPW_ERROR("failed to register wireless device\n");
11851 goto out_unregister_netdev;
11852 }
11853
11854 #ifdef CONFIG_IPW2200_PROMISCUOUS
11855 if (rtap_iface) {
11856 err = ipw_prom_alloc(priv);
11857 if (err) {
11858 IPW_ERROR("Failed to register promiscuous network "
11859 "device (error %d).\n", err);
11860 wiphy_unregister(priv->ieee->wdev.wiphy);
11861 kfree(priv->ieee->a_band.channels);
11862 kfree(priv->ieee->bg_band.channels);
11863 goto out_unregister_netdev;
11864 }
11865 }
11866 #endif
11867
11868 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11869 "channels, %d 802.11a channels)\n",
11870 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11871 priv->ieee->geo.a_channels);
11872
11873 return 0;
11874
11875 out_unregister_netdev:
11876 unregister_netdev(priv->net_dev);
11877 out_remove_sysfs:
11878 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11879 out_release_irq:
11880 free_irq(pdev->irq, priv);
11881 out_iounmap:
11882 iounmap(priv->hw_base);
11883 out_pci_release_regions:
11884 pci_release_regions(pdev);
11885 out_pci_disable_device:
11886 pci_disable_device(pdev);
11887 pci_set_drvdata(pdev, NULL);
11888 out_free_libipw:
11889 free_libipw(priv->net_dev, 0);
11890 out:
11891 return err;
11892 }
11893
11894 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11895 {
11896 struct ipw_priv *priv = pci_get_drvdata(pdev);
11897 struct list_head *p, *q;
11898 int i;
11899
11900 if (!priv)
11901 return;
11902
11903 mutex_lock(&priv->mutex);
11904
11905 priv->status |= STATUS_EXIT_PENDING;
11906 ipw_down(priv);
11907 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11908
11909 mutex_unlock(&priv->mutex);
11910
11911 unregister_netdev(priv->net_dev);
11912
11913 if (priv->rxq) {
11914 ipw_rx_queue_free(priv, priv->rxq);
11915 priv->rxq = NULL;
11916 }
11917 ipw_tx_queue_free(priv);
11918
11919 if (priv->cmdlog) {
11920 kfree(priv->cmdlog);
11921 priv->cmdlog = NULL;
11922 }
11923
11924 /* make sure all works are inactive */
11925 cancel_delayed_work_sync(&priv->adhoc_check);
11926 cancel_work_sync(&priv->associate);
11927 cancel_work_sync(&priv->disassociate);
11928 cancel_work_sync(&priv->system_config);
11929 cancel_work_sync(&priv->rx_replenish);
11930 cancel_work_sync(&priv->adapter_restart);
11931 cancel_delayed_work_sync(&priv->rf_kill);
11932 cancel_work_sync(&priv->up);
11933 cancel_work_sync(&priv->down);
11934 cancel_delayed_work_sync(&priv->request_scan);
11935 cancel_delayed_work_sync(&priv->request_direct_scan);
11936 cancel_delayed_work_sync(&priv->request_passive_scan);
11937 cancel_delayed_work_sync(&priv->scan_event);
11938 cancel_delayed_work_sync(&priv->gather_stats);
11939 cancel_work_sync(&priv->abort_scan);
11940 cancel_work_sync(&priv->roam);
11941 cancel_delayed_work_sync(&priv->scan_check);
11942 cancel_work_sync(&priv->link_up);
11943 cancel_work_sync(&priv->link_down);
11944 cancel_delayed_work_sync(&priv->led_link_on);
11945 cancel_delayed_work_sync(&priv->led_link_off);
11946 cancel_delayed_work_sync(&priv->led_act_off);
11947 cancel_work_sync(&priv->merge_networks);
11948
11949 /* Free MAC hash list for ADHOC */
11950 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11951 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11952 list_del(p);
11953 kfree(list_entry(p, struct ipw_ibss_seq, list));
11954 }
11955 }
11956
11957 kfree(priv->error);
11958 priv->error = NULL;
11959
11960 #ifdef CONFIG_IPW2200_PROMISCUOUS
11961 ipw_prom_free(priv);
11962 #endif
11963
11964 free_irq(pdev->irq, priv);
11965 iounmap(priv->hw_base);
11966 pci_release_regions(pdev);
11967 pci_disable_device(pdev);
11968 pci_set_drvdata(pdev, NULL);
11969 /* wiphy_unregister needs to be here, before free_libipw */
11970 wiphy_unregister(priv->ieee->wdev.wiphy);
11971 kfree(priv->ieee->a_band.channels);
11972 kfree(priv->ieee->bg_band.channels);
11973 free_libipw(priv->net_dev, 0);
11974 free_firmware();
11975 }
11976
11977 #ifdef CONFIG_PM
11978 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11979 {
11980 struct ipw_priv *priv = pci_get_drvdata(pdev);
11981 struct net_device *dev = priv->net_dev;
11982
11983 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11984
11985 /* Take down the device; powers it off, etc. */
11986 ipw_down(priv);
11987
11988 /* Remove the PRESENT state of the device */
11989 netif_device_detach(dev);
11990
11991 pci_save_state(pdev);
11992 pci_disable_device(pdev);
11993 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11994
11995 priv->suspend_at = get_seconds();
11996
11997 return 0;
11998 }
11999
12000 static int ipw_pci_resume(struct pci_dev *pdev)
12001 {
12002 struct ipw_priv *priv = pci_get_drvdata(pdev);
12003 struct net_device *dev = priv->net_dev;
12004 int err;
12005 u32 val;
12006
12007 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
12008
12009 pci_set_power_state(pdev, PCI_D0);
12010 err = pci_enable_device(pdev);
12011 if (err) {
12012 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
12013 dev->name);
12014 return err;
12015 }
12016 pci_restore_state(pdev);
12017
12018 /*
12019 * Suspend/Resume resets the PCI configuration space, so we have to
12020 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12021 * from interfering with C3 CPU state. pci_restore_state won't help
12022 * here since it only restores the first 64 bytes pci config header.
12023 */
12024 pci_read_config_dword(pdev, 0x40, &val);
12025 if ((val & 0x0000ff00) != 0)
12026 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12027
12028 /* Set the device back into the PRESENT state; this will also wake
12029 * the queue of needed */
12030 netif_device_attach(dev);
12031
12032 priv->suspend_time = get_seconds() - priv->suspend_at;
12033
12034 /* Bring the device back up */
12035 schedule_work(&priv->up);
12036
12037 return 0;
12038 }
12039 #endif
12040
12041 static void ipw_pci_shutdown(struct pci_dev *pdev)
12042 {
12043 struct ipw_priv *priv = pci_get_drvdata(pdev);
12044
12045 /* Take down the device; powers it off, etc. */
12046 ipw_down(priv);
12047
12048 pci_disable_device(pdev);
12049 }
12050
12051 /* driver initialization stuff */
12052 static struct pci_driver ipw_driver = {
12053 .name = DRV_NAME,
12054 .id_table = card_ids,
12055 .probe = ipw_pci_probe,
12056 .remove = __devexit_p(ipw_pci_remove),
12057 #ifdef CONFIG_PM
12058 .suspend = ipw_pci_suspend,
12059 .resume = ipw_pci_resume,
12060 #endif
12061 .shutdown = ipw_pci_shutdown,
12062 };
12063
12064 static int __init ipw_init(void)
12065 {
12066 int ret;
12067
12068 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12069 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12070
12071 ret = pci_register_driver(&ipw_driver);
12072 if (ret) {
12073 IPW_ERROR("Unable to initialize PCI module\n");
12074 return ret;
12075 }
12076
12077 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12078 if (ret) {
12079 IPW_ERROR("Unable to create driver sysfs file\n");
12080 pci_unregister_driver(&ipw_driver);
12081 return ret;
12082 }
12083
12084 return ret;
12085 }
12086
12087 static void __exit ipw_exit(void)
12088 {
12089 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12090 pci_unregister_driver(&ipw_driver);
12091 }
12092
12093 module_param(disable, int, 0444);
12094 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12095
12096 module_param(associate, int, 0444);
12097 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12098
12099 module_param(auto_create, int, 0444);
12100 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12101
12102 module_param_named(led, led_support, int, 0444);
12103 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12104
12105 module_param(debug, int, 0444);
12106 MODULE_PARM_DESC(debug, "debug output mask");
12107
12108 module_param_named(channel, default_channel, int, 0444);
12109 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12110
12111 #ifdef CONFIG_IPW2200_PROMISCUOUS
12112 module_param(rtap_iface, int, 0444);
12113 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12114 #endif
12115
12116 #ifdef CONFIG_IPW2200_QOS
12117 module_param(qos_enable, int, 0444);
12118 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12119
12120 module_param(qos_burst_enable, int, 0444);
12121 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12122
12123 module_param(qos_no_ack_mask, int, 0444);
12124 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12125
12126 module_param(burst_duration_CCK, int, 0444);
12127 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12128
12129 module_param(burst_duration_OFDM, int, 0444);
12130 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12131 #endif /* CONFIG_IPW2200_QOS */
12132
12133 #ifdef CONFIG_IPW2200_MONITOR
12134 module_param_named(mode, network_mode, int, 0444);
12135 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12136 #else
12137 module_param_named(mode, network_mode, int, 0444);
12138 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12139 #endif
12140
12141 module_param(bt_coexist, int, 0444);
12142 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12143
12144 module_param(hwcrypto, int, 0444);
12145 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12146
12147 module_param(cmdlog, int, 0444);
12148 MODULE_PARM_DESC(cmdlog,
12149 "allocate a ring buffer for logging firmware commands");
12150
12151 module_param(roaming, int, 0444);
12152 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12153
12154 module_param(antenna, int, 0444);
12155 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12156
12157 module_exit(ipw_exit);
12158 module_init(ipw_init);