mpc52xx/wdt: remove obsolete old WDT implementation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include "ipw2200.h"
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int default_channel = 0;
88 static int network_mode = 0;
89
90 static u32 ipw_debug_level;
91 static int associate;
92 static int auto_create = 1;
93 static int led_support = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
100 };
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
106
107 static struct ieee80211_rate ipw2200_rates[] = {
108 { .bitrate = 10 },
109 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
110 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
111 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
112 { .bitrate = 60 },
113 { .bitrate = 90 },
114 { .bitrate = 120 },
115 { .bitrate = 180 },
116 { .bitrate = 240 },
117 { .bitrate = 360 },
118 { .bitrate = 480 },
119 { .bitrate = 540 }
120 };
121
122 #define ipw2200_a_rates (ipw2200_rates + 4)
123 #define ipw2200_num_a_rates 8
124 #define ipw2200_bg_rates (ipw2200_rates + 0)
125 #define ipw2200_num_bg_rates 12
126
127 #ifdef CONFIG_IPW2200_QOS
128 static int qos_enable = 0;
129 static int qos_burst_enable = 0;
130 static int qos_no_ack_mask = 0;
131 static int burst_duration_CCK = 0;
132 static int burst_duration_OFDM = 0;
133
134 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
135 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
136 QOS_TX3_CW_MIN_OFDM},
137 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
138 QOS_TX3_CW_MAX_OFDM},
139 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
140 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
141 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
142 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
143 };
144
145 static struct libipw_qos_parameters def_qos_parameters_CCK = {
146 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
147 QOS_TX3_CW_MIN_CCK},
148 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
149 QOS_TX3_CW_MAX_CCK},
150 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
151 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
152 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
153 QOS_TX3_TXOP_LIMIT_CCK}
154 };
155
156 static struct libipw_qos_parameters def_parameters_OFDM = {
157 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
158 DEF_TX3_CW_MIN_OFDM},
159 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
160 DEF_TX3_CW_MAX_OFDM},
161 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
162 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
163 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
164 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
165 };
166
167 static struct libipw_qos_parameters def_parameters_CCK = {
168 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
169 DEF_TX3_CW_MIN_CCK},
170 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
171 DEF_TX3_CW_MAX_CCK},
172 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
173 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
174 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
175 DEF_TX3_TXOP_LIMIT_CCK}
176 };
177
178 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
179
180 static int from_priority_to_tx_queue[] = {
181 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
182 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
183 };
184
185 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
186
187 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
188 *qos_param);
189 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
190 *qos_param);
191 #endif /* CONFIG_IPW2200_QOS */
192
193 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
194 static void ipw_remove_current_network(struct ipw_priv *priv);
195 static void ipw_rx(struct ipw_priv *priv);
196 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
197 struct clx2_tx_queue *txq, int qindex);
198 static int ipw_queue_reset(struct ipw_priv *priv);
199
200 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
201 int len, int sync);
202
203 static void ipw_tx_queue_free(struct ipw_priv *);
204
205 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
206 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
207 static void ipw_rx_queue_replenish(void *);
208 static int ipw_up(struct ipw_priv *);
209 static void ipw_bg_up(struct work_struct *work);
210 static void ipw_down(struct ipw_priv *);
211 static void ipw_bg_down(struct work_struct *work);
212 static int ipw_config(struct ipw_priv *);
213 static int init_supported_rates(struct ipw_priv *priv,
214 struct ipw_supported_rates *prates);
215 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
216 static void ipw_send_wep_keys(struct ipw_priv *, int);
217
218 static int snprint_line(char *buf, size_t count,
219 const u8 * data, u32 len, u32 ofs)
220 {
221 int out, i, j, l;
222 char c;
223
224 out = snprintf(buf, count, "%08X", ofs);
225
226 for (l = 0, i = 0; i < 2; i++) {
227 out += snprintf(buf + out, count - out, " ");
228 for (j = 0; j < 8 && l < len; j++, l++)
229 out += snprintf(buf + out, count - out, "%02X ",
230 data[(i * 8 + j)]);
231 for (; j < 8; j++)
232 out += snprintf(buf + out, count - out, " ");
233 }
234
235 out += snprintf(buf + out, count - out, " ");
236 for (l = 0, i = 0; i < 2; i++) {
237 out += snprintf(buf + out, count - out, " ");
238 for (j = 0; j < 8 && l < len; j++, l++) {
239 c = data[(i * 8 + j)];
240 if (!isascii(c) || !isprint(c))
241 c = '.';
242
243 out += snprintf(buf + out, count - out, "%c", c);
244 }
245
246 for (; j < 8; j++)
247 out += snprintf(buf + out, count - out, " ");
248 }
249
250 return out;
251 }
252
253 static void printk_buf(int level, const u8 * data, u32 len)
254 {
255 char line[81];
256 u32 ofs = 0;
257 if (!(ipw_debug_level & level))
258 return;
259
260 while (len) {
261 snprint_line(line, sizeof(line), &data[ofs],
262 min(len, 16U), ofs);
263 printk(KERN_DEBUG "%s\n", line);
264 ofs += 16;
265 len -= min(len, 16U);
266 }
267 }
268
269 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
270 {
271 size_t out = size;
272 u32 ofs = 0;
273 int total = 0;
274
275 while (size && len) {
276 out = snprint_line(output, size, &data[ofs],
277 min_t(size_t, len, 16U), ofs);
278
279 ofs += 16;
280 output += out;
281 size -= out;
282 len -= min_t(size_t, len, 16U);
283 total += out;
284 }
285 return total;
286 }
287
288 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
289 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
290 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
291
292 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
293 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
294 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
295
296 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
297 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
298 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
299 {
300 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
301 __LINE__, (u32) (b), (u32) (c));
302 _ipw_write_reg8(a, b, c);
303 }
304
305 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
306 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
307 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
308 {
309 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
310 __LINE__, (u32) (b), (u32) (c));
311 _ipw_write_reg16(a, b, c);
312 }
313
314 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
315 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
316 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
317 {
318 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
319 __LINE__, (u32) (b), (u32) (c));
320 _ipw_write_reg32(a, b, c);
321 }
322
323 /* 8-bit direct write (low 4K) */
324 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
325 u8 val)
326 {
327 writeb(val, ipw->hw_base + ofs);
328 }
329
330 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
331 #define ipw_write8(ipw, ofs, val) do { \
332 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
333 __LINE__, (u32)(ofs), (u32)(val)); \
334 _ipw_write8(ipw, ofs, val); \
335 } while (0)
336
337 /* 16-bit direct write (low 4K) */
338 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
339 u16 val)
340 {
341 writew(val, ipw->hw_base + ofs);
342 }
343
344 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
345 #define ipw_write16(ipw, ofs, val) do { \
346 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
347 __LINE__, (u32)(ofs), (u32)(val)); \
348 _ipw_write16(ipw, ofs, val); \
349 } while (0)
350
351 /* 32-bit direct write (low 4K) */
352 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
353 u32 val)
354 {
355 writel(val, ipw->hw_base + ofs);
356 }
357
358 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
359 #define ipw_write32(ipw, ofs, val) do { \
360 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
361 __LINE__, (u32)(ofs), (u32)(val)); \
362 _ipw_write32(ipw, ofs, val); \
363 } while (0)
364
365 /* 8-bit direct read (low 4K) */
366 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
367 {
368 return readb(ipw->hw_base + ofs);
369 }
370
371 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
372 #define ipw_read8(ipw, ofs) ({ \
373 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
374 (u32)(ofs)); \
375 _ipw_read8(ipw, ofs); \
376 })
377
378 /* 16-bit direct read (low 4K) */
379 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
380 {
381 return readw(ipw->hw_base + ofs);
382 }
383
384 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
385 #define ipw_read16(ipw, ofs) ({ \
386 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
387 (u32)(ofs)); \
388 _ipw_read16(ipw, ofs); \
389 })
390
391 /* 32-bit direct read (low 4K) */
392 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
393 {
394 return readl(ipw->hw_base + ofs);
395 }
396
397 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
398 #define ipw_read32(ipw, ofs) ({ \
399 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
400 (u32)(ofs)); \
401 _ipw_read32(ipw, ofs); \
402 })
403
404 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
405 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
406 #define ipw_read_indirect(a, b, c, d) ({ \
407 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
408 __LINE__, (u32)(b), (u32)(d)); \
409 _ipw_read_indirect(a, b, c, d); \
410 })
411
412 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
413 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
414 int num);
415 #define ipw_write_indirect(a, b, c, d) do { \
416 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
417 __LINE__, (u32)(b), (u32)(d)); \
418 _ipw_write_indirect(a, b, c, d); \
419 } while (0)
420
421 /* 32-bit indirect write (above 4K) */
422 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
423 {
424 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
425 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
426 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
427 }
428
429 /* 8-bit indirect write (above 4K) */
430 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
431 {
432 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
433 u32 dif_len = reg - aligned_addr;
434
435 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
436 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
437 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
438 }
439
440 /* 16-bit indirect write (above 4K) */
441 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
442 {
443 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
444 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
445
446 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
447 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
448 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
449 }
450
451 /* 8-bit indirect read (above 4K) */
452 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
453 {
454 u32 word;
455 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
456 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
457 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
458 return (word >> ((reg & 0x3) * 8)) & 0xff;
459 }
460
461 /* 32-bit indirect read (above 4K) */
462 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
463 {
464 u32 value;
465
466 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
467
468 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
469 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
470 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
471 return value;
472 }
473
474 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
475 /* for area above 1st 4K of SRAM/reg space */
476 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
477 int num)
478 {
479 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
480 u32 dif_len = addr - aligned_addr;
481 u32 i;
482
483 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
484
485 if (num <= 0) {
486 return;
487 }
488
489 /* Read the first dword (or portion) byte by byte */
490 if (unlikely(dif_len)) {
491 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
492 /* Start reading at aligned_addr + dif_len */
493 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
494 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
495 aligned_addr += 4;
496 }
497
498 /* Read all of the middle dwords as dwords, with auto-increment */
499 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
500 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
501 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
502
503 /* Read the last dword (or portion) byte by byte */
504 if (unlikely(num)) {
505 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
506 for (i = 0; num > 0; i++, num--)
507 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
508 }
509 }
510
511 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
512 /* for area above 1st 4K of SRAM/reg space */
513 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
514 int num)
515 {
516 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
517 u32 dif_len = addr - aligned_addr;
518 u32 i;
519
520 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
521
522 if (num <= 0) {
523 return;
524 }
525
526 /* Write the first dword (or portion) byte by byte */
527 if (unlikely(dif_len)) {
528 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
529 /* Start writing at aligned_addr + dif_len */
530 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
531 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
532 aligned_addr += 4;
533 }
534
535 /* Write all of the middle dwords as dwords, with auto-increment */
536 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
537 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
538 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
539
540 /* Write the last dword (or portion) byte by byte */
541 if (unlikely(num)) {
542 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
543 for (i = 0; num > 0; i++, num--, buf++)
544 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
545 }
546 }
547
548 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
549 /* for 1st 4K of SRAM/regs space */
550 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
551 int num)
552 {
553 memcpy_toio((priv->hw_base + addr), buf, num);
554 }
555
556 /* Set bit(s) in low 4K of SRAM/regs */
557 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
558 {
559 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
560 }
561
562 /* Clear bit(s) in low 4K of SRAM/regs */
563 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
564 {
565 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
566 }
567
568 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
569 {
570 if (priv->status & STATUS_INT_ENABLED)
571 return;
572 priv->status |= STATUS_INT_ENABLED;
573 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
574 }
575
576 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
577 {
578 if (!(priv->status & STATUS_INT_ENABLED))
579 return;
580 priv->status &= ~STATUS_INT_ENABLED;
581 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
582 }
583
584 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 unsigned long flags;
587
588 spin_lock_irqsave(&priv->irq_lock, flags);
589 __ipw_enable_interrupts(priv);
590 spin_unlock_irqrestore(&priv->irq_lock, flags);
591 }
592
593 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
594 {
595 unsigned long flags;
596
597 spin_lock_irqsave(&priv->irq_lock, flags);
598 __ipw_disable_interrupts(priv);
599 spin_unlock_irqrestore(&priv->irq_lock, flags);
600 }
601
602 static char *ipw_error_desc(u32 val)
603 {
604 switch (val) {
605 case IPW_FW_ERROR_OK:
606 return "ERROR_OK";
607 case IPW_FW_ERROR_FAIL:
608 return "ERROR_FAIL";
609 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
610 return "MEMORY_UNDERFLOW";
611 case IPW_FW_ERROR_MEMORY_OVERFLOW:
612 return "MEMORY_OVERFLOW";
613 case IPW_FW_ERROR_BAD_PARAM:
614 return "BAD_PARAM";
615 case IPW_FW_ERROR_BAD_CHECKSUM:
616 return "BAD_CHECKSUM";
617 case IPW_FW_ERROR_NMI_INTERRUPT:
618 return "NMI_INTERRUPT";
619 case IPW_FW_ERROR_BAD_DATABASE:
620 return "BAD_DATABASE";
621 case IPW_FW_ERROR_ALLOC_FAIL:
622 return "ALLOC_FAIL";
623 case IPW_FW_ERROR_DMA_UNDERRUN:
624 return "DMA_UNDERRUN";
625 case IPW_FW_ERROR_DMA_STATUS:
626 return "DMA_STATUS";
627 case IPW_FW_ERROR_DINO_ERROR:
628 return "DINO_ERROR";
629 case IPW_FW_ERROR_EEPROM_ERROR:
630 return "EEPROM_ERROR";
631 case IPW_FW_ERROR_SYSASSERT:
632 return "SYSASSERT";
633 case IPW_FW_ERROR_FATAL_ERROR:
634 return "FATAL_ERROR";
635 default:
636 return "UNKNOWN_ERROR";
637 }
638 }
639
640 static void ipw_dump_error_log(struct ipw_priv *priv,
641 struct ipw_fw_error *error)
642 {
643 u32 i;
644
645 if (!error) {
646 IPW_ERROR("Error allocating and capturing error log. "
647 "Nothing to dump.\n");
648 return;
649 }
650
651 IPW_ERROR("Start IPW Error Log Dump:\n");
652 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
653 error->status, error->config);
654
655 for (i = 0; i < error->elem_len; i++)
656 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
657 ipw_error_desc(error->elem[i].desc),
658 error->elem[i].time,
659 error->elem[i].blink1,
660 error->elem[i].blink2,
661 error->elem[i].link1,
662 error->elem[i].link2, error->elem[i].data);
663 for (i = 0; i < error->log_len; i++)
664 IPW_ERROR("%i\t0x%08x\t%i\n",
665 error->log[i].time,
666 error->log[i].data, error->log[i].event);
667 }
668
669 static inline int ipw_is_init(struct ipw_priv *priv)
670 {
671 return (priv->status & STATUS_INIT) ? 1 : 0;
672 }
673
674 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
675 {
676 u32 addr, field_info, field_len, field_count, total_len;
677
678 IPW_DEBUG_ORD("ordinal = %i\n", ord);
679
680 if (!priv || !val || !len) {
681 IPW_DEBUG_ORD("Invalid argument\n");
682 return -EINVAL;
683 }
684
685 /* verify device ordinal tables have been initialized */
686 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
687 IPW_DEBUG_ORD("Access ordinals before initialization\n");
688 return -EINVAL;
689 }
690
691 switch (IPW_ORD_TABLE_ID_MASK & ord) {
692 case IPW_ORD_TABLE_0_MASK:
693 /*
694 * TABLE 0: Direct access to a table of 32 bit values
695 *
696 * This is a very simple table with the data directly
697 * read from the table
698 */
699
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703 /* boundary check */
704 if (ord > priv->table0_len) {
705 IPW_DEBUG_ORD("ordinal value (%i) longer then "
706 "max (%i)\n", ord, priv->table0_len);
707 return -EINVAL;
708 }
709
710 /* verify we have enough room to store the value */
711 if (*len < sizeof(u32)) {
712 IPW_DEBUG_ORD("ordinal buffer length too small, "
713 "need %zd\n", sizeof(u32));
714 return -EINVAL;
715 }
716
717 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
718 ord, priv->table0_addr + (ord << 2));
719
720 *len = sizeof(u32);
721 ord <<= 2;
722 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
723 break;
724
725 case IPW_ORD_TABLE_1_MASK:
726 /*
727 * TABLE 1: Indirect access to a table of 32 bit values
728 *
729 * This is a fairly large table of u32 values each
730 * representing starting addr for the data (which is
731 * also a u32)
732 */
733
734 /* remove the table id from the ordinal */
735 ord &= IPW_ORD_TABLE_VALUE_MASK;
736
737 /* boundary check */
738 if (ord > priv->table1_len) {
739 IPW_DEBUG_ORD("ordinal value too long\n");
740 return -EINVAL;
741 }
742
743 /* verify we have enough room to store the value */
744 if (*len < sizeof(u32)) {
745 IPW_DEBUG_ORD("ordinal buffer length too small, "
746 "need %zd\n", sizeof(u32));
747 return -EINVAL;
748 }
749
750 *((u32 *) val) =
751 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
752 *len = sizeof(u32);
753 break;
754
755 case IPW_ORD_TABLE_2_MASK:
756 /*
757 * TABLE 2: Indirect access to a table of variable sized values
758 *
759 * This table consist of six values, each containing
760 * - dword containing the starting offset of the data
761 * - dword containing the lengh in the first 16bits
762 * and the count in the second 16bits
763 */
764
765 /* remove the table id from the ordinal */
766 ord &= IPW_ORD_TABLE_VALUE_MASK;
767
768 /* boundary check */
769 if (ord > priv->table2_len) {
770 IPW_DEBUG_ORD("ordinal value too long\n");
771 return -EINVAL;
772 }
773
774 /* get the address of statistic */
775 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
776
777 /* get the second DW of statistics ;
778 * two 16-bit words - first is length, second is count */
779 field_info =
780 ipw_read_reg32(priv,
781 priv->table2_addr + (ord << 3) +
782 sizeof(u32));
783
784 /* get each entry length */
785 field_len = *((u16 *) & field_info);
786
787 /* get number of entries */
788 field_count = *(((u16 *) & field_info) + 1);
789
790 /* abort if not enought memory */
791 total_len = field_len * field_count;
792 if (total_len > *len) {
793 *len = total_len;
794 return -EINVAL;
795 }
796
797 *len = total_len;
798 if (!total_len)
799 return 0;
800
801 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
802 "field_info = 0x%08x\n",
803 addr, total_len, field_info);
804 ipw_read_indirect(priv, addr, val, total_len);
805 break;
806
807 default:
808 IPW_DEBUG_ORD("Invalid ordinal!\n");
809 return -EINVAL;
810
811 }
812
813 return 0;
814 }
815
816 static void ipw_init_ordinals(struct ipw_priv *priv)
817 {
818 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
819 priv->table0_len = ipw_read32(priv, priv->table0_addr);
820
821 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
822 priv->table0_addr, priv->table0_len);
823
824 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
825 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
826
827 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
828 priv->table1_addr, priv->table1_len);
829
830 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
831 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
832 priv->table2_len &= 0x0000ffff; /* use first two bytes */
833
834 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
835 priv->table2_addr, priv->table2_len);
836
837 }
838
839 static u32 ipw_register_toggle(u32 reg)
840 {
841 reg &= ~IPW_START_STANDBY;
842 if (reg & IPW_GATE_ODMA)
843 reg &= ~IPW_GATE_ODMA;
844 if (reg & IPW_GATE_IDMA)
845 reg &= ~IPW_GATE_IDMA;
846 if (reg & IPW_GATE_ADMA)
847 reg &= ~IPW_GATE_ADMA;
848 return reg;
849 }
850
851 /*
852 * LED behavior:
853 * - On radio ON, turn on any LEDs that require to be on during start
854 * - On initialization, start unassociated blink
855 * - On association, disable unassociated blink
856 * - On disassociation, start unassociated blink
857 * - On radio OFF, turn off any LEDs started during radio on
858 *
859 */
860 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
861 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
862 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
863
864 static void ipw_led_link_on(struct ipw_priv *priv)
865 {
866 unsigned long flags;
867 u32 led;
868
869 /* If configured to not use LEDs, or nic_type is 1,
870 * then we don't toggle a LINK led */
871 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
872 return;
873
874 spin_lock_irqsave(&priv->lock, flags);
875
876 if (!(priv->status & STATUS_RF_KILL_MASK) &&
877 !(priv->status & STATUS_LED_LINK_ON)) {
878 IPW_DEBUG_LED("Link LED On\n");
879 led = ipw_read_reg32(priv, IPW_EVENT_REG);
880 led |= priv->led_association_on;
881
882 led = ipw_register_toggle(led);
883
884 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
885 ipw_write_reg32(priv, IPW_EVENT_REG, led);
886
887 priv->status |= STATUS_LED_LINK_ON;
888
889 /* If we aren't associated, schedule turning the LED off */
890 if (!(priv->status & STATUS_ASSOCIATED))
891 queue_delayed_work(priv->workqueue,
892 &priv->led_link_off,
893 LD_TIME_LINK_ON);
894 }
895
896 spin_unlock_irqrestore(&priv->lock, flags);
897 }
898
899 static void ipw_bg_led_link_on(struct work_struct *work)
900 {
901 struct ipw_priv *priv =
902 container_of(work, struct ipw_priv, led_link_on.work);
903 mutex_lock(&priv->mutex);
904 ipw_led_link_on(priv);
905 mutex_unlock(&priv->mutex);
906 }
907
908 static void ipw_led_link_off(struct ipw_priv *priv)
909 {
910 unsigned long flags;
911 u32 led;
912
913 /* If configured not to use LEDs, or nic type is 1,
914 * then we don't goggle the LINK led. */
915 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
916 return;
917
918 spin_lock_irqsave(&priv->lock, flags);
919
920 if (priv->status & STATUS_LED_LINK_ON) {
921 led = ipw_read_reg32(priv, IPW_EVENT_REG);
922 led &= priv->led_association_off;
923 led = ipw_register_toggle(led);
924
925 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
926 ipw_write_reg32(priv, IPW_EVENT_REG, led);
927
928 IPW_DEBUG_LED("Link LED Off\n");
929
930 priv->status &= ~STATUS_LED_LINK_ON;
931
932 /* If we aren't associated and the radio is on, schedule
933 * turning the LED on (blink while unassociated) */
934 if (!(priv->status & STATUS_RF_KILL_MASK) &&
935 !(priv->status & STATUS_ASSOCIATED))
936 queue_delayed_work(priv->workqueue, &priv->led_link_on,
937 LD_TIME_LINK_OFF);
938
939 }
940
941 spin_unlock_irqrestore(&priv->lock, flags);
942 }
943
944 static void ipw_bg_led_link_off(struct work_struct *work)
945 {
946 struct ipw_priv *priv =
947 container_of(work, struct ipw_priv, led_link_off.work);
948 mutex_lock(&priv->mutex);
949 ipw_led_link_off(priv);
950 mutex_unlock(&priv->mutex);
951 }
952
953 static void __ipw_led_activity_on(struct ipw_priv *priv)
954 {
955 u32 led;
956
957 if (priv->config & CFG_NO_LED)
958 return;
959
960 if (priv->status & STATUS_RF_KILL_MASK)
961 return;
962
963 if (!(priv->status & STATUS_LED_ACT_ON)) {
964 led = ipw_read_reg32(priv, IPW_EVENT_REG);
965 led |= priv->led_activity_on;
966
967 led = ipw_register_toggle(led);
968
969 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
970 ipw_write_reg32(priv, IPW_EVENT_REG, led);
971
972 IPW_DEBUG_LED("Activity LED On\n");
973
974 priv->status |= STATUS_LED_ACT_ON;
975
976 cancel_delayed_work(&priv->led_act_off);
977 queue_delayed_work(priv->workqueue, &priv->led_act_off,
978 LD_TIME_ACT_ON);
979 } else {
980 /* Reschedule LED off for full time period */
981 cancel_delayed_work(&priv->led_act_off);
982 queue_delayed_work(priv->workqueue, &priv->led_act_off,
983 LD_TIME_ACT_ON);
984 }
985 }
986
987 #if 0
988 void ipw_led_activity_on(struct ipw_priv *priv)
989 {
990 unsigned long flags;
991 spin_lock_irqsave(&priv->lock, flags);
992 __ipw_led_activity_on(priv);
993 spin_unlock_irqrestore(&priv->lock, flags);
994 }
995 #endif /* 0 */
996
997 static void ipw_led_activity_off(struct ipw_priv *priv)
998 {
999 unsigned long flags;
1000 u32 led;
1001
1002 if (priv->config & CFG_NO_LED)
1003 return;
1004
1005 spin_lock_irqsave(&priv->lock, flags);
1006
1007 if (priv->status & STATUS_LED_ACT_ON) {
1008 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1009 led &= priv->led_activity_off;
1010
1011 led = ipw_register_toggle(led);
1012
1013 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1014 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1015
1016 IPW_DEBUG_LED("Activity LED Off\n");
1017
1018 priv->status &= ~STATUS_LED_ACT_ON;
1019 }
1020
1021 spin_unlock_irqrestore(&priv->lock, flags);
1022 }
1023
1024 static void ipw_bg_led_activity_off(struct work_struct *work)
1025 {
1026 struct ipw_priv *priv =
1027 container_of(work, struct ipw_priv, led_act_off.work);
1028 mutex_lock(&priv->mutex);
1029 ipw_led_activity_off(priv);
1030 mutex_unlock(&priv->mutex);
1031 }
1032
1033 static void ipw_led_band_on(struct ipw_priv *priv)
1034 {
1035 unsigned long flags;
1036 u32 led;
1037
1038 /* Only nic type 1 supports mode LEDs */
1039 if (priv->config & CFG_NO_LED ||
1040 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1041 return;
1042
1043 spin_lock_irqsave(&priv->lock, flags);
1044
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 if (priv->assoc_network->mode == IEEE_A) {
1047 led |= priv->led_ofdm_on;
1048 led &= priv->led_association_off;
1049 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1050 } else if (priv->assoc_network->mode == IEEE_G) {
1051 led |= priv->led_ofdm_on;
1052 led |= priv->led_association_on;
1053 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1054 } else {
1055 led &= priv->led_ofdm_off;
1056 led |= priv->led_association_on;
1057 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1058 }
1059
1060 led = ipw_register_toggle(led);
1061
1062 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1063 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1064
1065 spin_unlock_irqrestore(&priv->lock, flags);
1066 }
1067
1068 static void ipw_led_band_off(struct ipw_priv *priv)
1069 {
1070 unsigned long flags;
1071 u32 led;
1072
1073 /* Only nic type 1 supports mode LEDs */
1074 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1075 return;
1076
1077 spin_lock_irqsave(&priv->lock, flags);
1078
1079 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1080 led &= priv->led_ofdm_off;
1081 led &= priv->led_association_off;
1082
1083 led = ipw_register_toggle(led);
1084
1085 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1086 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1087
1088 spin_unlock_irqrestore(&priv->lock, flags);
1089 }
1090
1091 static void ipw_led_radio_on(struct ipw_priv *priv)
1092 {
1093 ipw_led_link_on(priv);
1094 }
1095
1096 static void ipw_led_radio_off(struct ipw_priv *priv)
1097 {
1098 ipw_led_activity_off(priv);
1099 ipw_led_link_off(priv);
1100 }
1101
1102 static void ipw_led_link_up(struct ipw_priv *priv)
1103 {
1104 /* Set the Link Led on for all nic types */
1105 ipw_led_link_on(priv);
1106 }
1107
1108 static void ipw_led_link_down(struct ipw_priv *priv)
1109 {
1110 ipw_led_activity_off(priv);
1111 ipw_led_link_off(priv);
1112
1113 if (priv->status & STATUS_RF_KILL_MASK)
1114 ipw_led_radio_off(priv);
1115 }
1116
1117 static void ipw_led_init(struct ipw_priv *priv)
1118 {
1119 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1120
1121 /* Set the default PINs for the link and activity leds */
1122 priv->led_activity_on = IPW_ACTIVITY_LED;
1123 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1124
1125 priv->led_association_on = IPW_ASSOCIATED_LED;
1126 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1127
1128 /* Set the default PINs for the OFDM leds */
1129 priv->led_ofdm_on = IPW_OFDM_LED;
1130 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1131
1132 switch (priv->nic_type) {
1133 case EEPROM_NIC_TYPE_1:
1134 /* In this NIC type, the LEDs are reversed.... */
1135 priv->led_activity_on = IPW_ASSOCIATED_LED;
1136 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1137 priv->led_association_on = IPW_ACTIVITY_LED;
1138 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1139
1140 if (!(priv->config & CFG_NO_LED))
1141 ipw_led_band_on(priv);
1142
1143 /* And we don't blink link LEDs for this nic, so
1144 * just return here */
1145 return;
1146
1147 case EEPROM_NIC_TYPE_3:
1148 case EEPROM_NIC_TYPE_2:
1149 case EEPROM_NIC_TYPE_4:
1150 case EEPROM_NIC_TYPE_0:
1151 break;
1152
1153 default:
1154 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1155 priv->nic_type);
1156 priv->nic_type = EEPROM_NIC_TYPE_0;
1157 break;
1158 }
1159
1160 if (!(priv->config & CFG_NO_LED)) {
1161 if (priv->status & STATUS_ASSOCIATED)
1162 ipw_led_link_on(priv);
1163 else
1164 ipw_led_link_off(priv);
1165 }
1166 }
1167
1168 static void ipw_led_shutdown(struct ipw_priv *priv)
1169 {
1170 ipw_led_activity_off(priv);
1171 ipw_led_link_off(priv);
1172 ipw_led_band_off(priv);
1173 cancel_delayed_work(&priv->led_link_on);
1174 cancel_delayed_work(&priv->led_link_off);
1175 cancel_delayed_work(&priv->led_act_off);
1176 }
1177
1178 /*
1179 * The following adds a new attribute to the sysfs representation
1180 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1181 * used for controling the debug level.
1182 *
1183 * See the level definitions in ipw for details.
1184 */
1185 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1186 {
1187 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1188 }
1189
1190 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1191 size_t count)
1192 {
1193 char *p = (char *)buf;
1194 u32 val;
1195
1196 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1197 p++;
1198 if (p[0] == 'x' || p[0] == 'X')
1199 p++;
1200 val = simple_strtoul(p, &p, 16);
1201 } else
1202 val = simple_strtoul(p, &p, 10);
1203 if (p == buf)
1204 printk(KERN_INFO DRV_NAME
1205 ": %s is not in hex or decimal form.\n", buf);
1206 else
1207 ipw_debug_level = val;
1208
1209 return strnlen(buf, count);
1210 }
1211
1212 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1213 show_debug_level, store_debug_level);
1214
1215 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1216 {
1217 /* length = 1st dword in log */
1218 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1219 }
1220
1221 static void ipw_capture_event_log(struct ipw_priv *priv,
1222 u32 log_len, struct ipw_event *log)
1223 {
1224 u32 base;
1225
1226 if (log_len) {
1227 base = ipw_read32(priv, IPW_EVENT_LOG);
1228 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1229 (u8 *) log, sizeof(*log) * log_len);
1230 }
1231 }
1232
1233 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1234 {
1235 struct ipw_fw_error *error;
1236 u32 log_len = ipw_get_event_log_len(priv);
1237 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1238 u32 elem_len = ipw_read_reg32(priv, base);
1239
1240 error = kmalloc(sizeof(*error) +
1241 sizeof(*error->elem) * elem_len +
1242 sizeof(*error->log) * log_len, GFP_ATOMIC);
1243 if (!error) {
1244 IPW_ERROR("Memory allocation for firmware error log "
1245 "failed.\n");
1246 return NULL;
1247 }
1248 error->jiffies = jiffies;
1249 error->status = priv->status;
1250 error->config = priv->config;
1251 error->elem_len = elem_len;
1252 error->log_len = log_len;
1253 error->elem = (struct ipw_error_elem *)error->payload;
1254 error->log = (struct ipw_event *)(error->elem + elem_len);
1255
1256 ipw_capture_event_log(priv, log_len, error->log);
1257
1258 if (elem_len)
1259 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1260 sizeof(*error->elem) * elem_len);
1261
1262 return error;
1263 }
1264
1265 static ssize_t show_event_log(struct device *d,
1266 struct device_attribute *attr, char *buf)
1267 {
1268 struct ipw_priv *priv = dev_get_drvdata(d);
1269 u32 log_len = ipw_get_event_log_len(priv);
1270 u32 log_size;
1271 struct ipw_event *log;
1272 u32 len = 0, i;
1273
1274 /* not using min() because of its strict type checking */
1275 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1276 sizeof(*log) * log_len : PAGE_SIZE;
1277 log = kzalloc(log_size, GFP_KERNEL);
1278 if (!log) {
1279 IPW_ERROR("Unable to allocate memory for log\n");
1280 return 0;
1281 }
1282 log_len = log_size / sizeof(*log);
1283 ipw_capture_event_log(priv, log_len, log);
1284
1285 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1286 for (i = 0; i < log_len; i++)
1287 len += snprintf(buf + len, PAGE_SIZE - len,
1288 "\n%08X%08X%08X",
1289 log[i].time, log[i].event, log[i].data);
1290 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1291 kfree(log);
1292 return len;
1293 }
1294
1295 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1296
1297 static ssize_t show_error(struct device *d,
1298 struct device_attribute *attr, char *buf)
1299 {
1300 struct ipw_priv *priv = dev_get_drvdata(d);
1301 u32 len = 0, i;
1302 if (!priv->error)
1303 return 0;
1304 len += snprintf(buf + len, PAGE_SIZE - len,
1305 "%08lX%08X%08X%08X",
1306 priv->error->jiffies,
1307 priv->error->status,
1308 priv->error->config, priv->error->elem_len);
1309 for (i = 0; i < priv->error->elem_len; i++)
1310 len += snprintf(buf + len, PAGE_SIZE - len,
1311 "\n%08X%08X%08X%08X%08X%08X%08X",
1312 priv->error->elem[i].time,
1313 priv->error->elem[i].desc,
1314 priv->error->elem[i].blink1,
1315 priv->error->elem[i].blink2,
1316 priv->error->elem[i].link1,
1317 priv->error->elem[i].link2,
1318 priv->error->elem[i].data);
1319
1320 len += snprintf(buf + len, PAGE_SIZE - len,
1321 "\n%08X", priv->error->log_len);
1322 for (i = 0; i < priv->error->log_len; i++)
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X%08X%08X",
1325 priv->error->log[i].time,
1326 priv->error->log[i].event,
1327 priv->error->log[i].data);
1328 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1329 return len;
1330 }
1331
1332 static ssize_t clear_error(struct device *d,
1333 struct device_attribute *attr,
1334 const char *buf, size_t count)
1335 {
1336 struct ipw_priv *priv = dev_get_drvdata(d);
1337
1338 kfree(priv->error);
1339 priv->error = NULL;
1340 return count;
1341 }
1342
1343 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1344
1345 static ssize_t show_cmd_log(struct device *d,
1346 struct device_attribute *attr, char *buf)
1347 {
1348 struct ipw_priv *priv = dev_get_drvdata(d);
1349 u32 len = 0, i;
1350 if (!priv->cmdlog)
1351 return 0;
1352 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1353 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1354 i = (i + 1) % priv->cmdlog_len) {
1355 len +=
1356 snprintf(buf + len, PAGE_SIZE - len,
1357 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1358 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1359 priv->cmdlog[i].cmd.len);
1360 len +=
1361 snprintk_buf(buf + len, PAGE_SIZE - len,
1362 (u8 *) priv->cmdlog[i].cmd.param,
1363 priv->cmdlog[i].cmd.len);
1364 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1365 }
1366 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1367 return len;
1368 }
1369
1370 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1371
1372 #ifdef CONFIG_IPW2200_PROMISCUOUS
1373 static void ipw_prom_free(struct ipw_priv *priv);
1374 static int ipw_prom_alloc(struct ipw_priv *priv);
1375 static ssize_t store_rtap_iface(struct device *d,
1376 struct device_attribute *attr,
1377 const char *buf, size_t count)
1378 {
1379 struct ipw_priv *priv = dev_get_drvdata(d);
1380 int rc = 0;
1381
1382 if (count < 1)
1383 return -EINVAL;
1384
1385 switch (buf[0]) {
1386 case '0':
1387 if (!rtap_iface)
1388 return count;
1389
1390 if (netif_running(priv->prom_net_dev)) {
1391 IPW_WARNING("Interface is up. Cannot unregister.\n");
1392 return count;
1393 }
1394
1395 ipw_prom_free(priv);
1396 rtap_iface = 0;
1397 break;
1398
1399 case '1':
1400 if (rtap_iface)
1401 return count;
1402
1403 rc = ipw_prom_alloc(priv);
1404 if (!rc)
1405 rtap_iface = 1;
1406 break;
1407
1408 default:
1409 return -EINVAL;
1410 }
1411
1412 if (rc) {
1413 IPW_ERROR("Failed to register promiscuous network "
1414 "device (error %d).\n", rc);
1415 }
1416
1417 return count;
1418 }
1419
1420 static ssize_t show_rtap_iface(struct device *d,
1421 struct device_attribute *attr,
1422 char *buf)
1423 {
1424 struct ipw_priv *priv = dev_get_drvdata(d);
1425 if (rtap_iface)
1426 return sprintf(buf, "%s", priv->prom_net_dev->name);
1427 else {
1428 buf[0] = '-';
1429 buf[1] = '1';
1430 buf[2] = '\0';
1431 return 3;
1432 }
1433 }
1434
1435 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1436 store_rtap_iface);
1437
1438 static ssize_t store_rtap_filter(struct device *d,
1439 struct device_attribute *attr,
1440 const char *buf, size_t count)
1441 {
1442 struct ipw_priv *priv = dev_get_drvdata(d);
1443
1444 if (!priv->prom_priv) {
1445 IPW_ERROR("Attempting to set filter without "
1446 "rtap_iface enabled.\n");
1447 return -EPERM;
1448 }
1449
1450 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1451
1452 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1453 BIT_ARG16(priv->prom_priv->filter));
1454
1455 return count;
1456 }
1457
1458 static ssize_t show_rtap_filter(struct device *d,
1459 struct device_attribute *attr,
1460 char *buf)
1461 {
1462 struct ipw_priv *priv = dev_get_drvdata(d);
1463 return sprintf(buf, "0x%04X",
1464 priv->prom_priv ? priv->prom_priv->filter : 0);
1465 }
1466
1467 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1468 store_rtap_filter);
1469 #endif
1470
1471 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1472 char *buf)
1473 {
1474 struct ipw_priv *priv = dev_get_drvdata(d);
1475 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1476 }
1477
1478 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1479 const char *buf, size_t count)
1480 {
1481 struct ipw_priv *priv = dev_get_drvdata(d);
1482 struct net_device *dev = priv->net_dev;
1483 char buffer[] = "00000000";
1484 unsigned long len =
1485 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1486 unsigned long val;
1487 char *p = buffer;
1488
1489 IPW_DEBUG_INFO("enter\n");
1490
1491 strncpy(buffer, buf, len);
1492 buffer[len] = 0;
1493
1494 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1495 p++;
1496 if (p[0] == 'x' || p[0] == 'X')
1497 p++;
1498 val = simple_strtoul(p, &p, 16);
1499 } else
1500 val = simple_strtoul(p, &p, 10);
1501 if (p == buffer) {
1502 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1503 } else {
1504 priv->ieee->scan_age = val;
1505 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1506 }
1507
1508 IPW_DEBUG_INFO("exit\n");
1509 return len;
1510 }
1511
1512 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1513
1514 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1515 char *buf)
1516 {
1517 struct ipw_priv *priv = dev_get_drvdata(d);
1518 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1519 }
1520
1521 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1522 const char *buf, size_t count)
1523 {
1524 struct ipw_priv *priv = dev_get_drvdata(d);
1525
1526 IPW_DEBUG_INFO("enter\n");
1527
1528 if (count == 0)
1529 return 0;
1530
1531 if (*buf == 0) {
1532 IPW_DEBUG_LED("Disabling LED control.\n");
1533 priv->config |= CFG_NO_LED;
1534 ipw_led_shutdown(priv);
1535 } else {
1536 IPW_DEBUG_LED("Enabling LED control.\n");
1537 priv->config &= ~CFG_NO_LED;
1538 ipw_led_init(priv);
1539 }
1540
1541 IPW_DEBUG_INFO("exit\n");
1542 return count;
1543 }
1544
1545 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1546
1547 static ssize_t show_status(struct device *d,
1548 struct device_attribute *attr, char *buf)
1549 {
1550 struct ipw_priv *p = dev_get_drvdata(d);
1551 return sprintf(buf, "0x%08x\n", (int)p->status);
1552 }
1553
1554 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1555
1556 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1557 char *buf)
1558 {
1559 struct ipw_priv *p = dev_get_drvdata(d);
1560 return sprintf(buf, "0x%08x\n", (int)p->config);
1561 }
1562
1563 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1564
1565 static ssize_t show_nic_type(struct device *d,
1566 struct device_attribute *attr, char *buf)
1567 {
1568 struct ipw_priv *priv = dev_get_drvdata(d);
1569 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1570 }
1571
1572 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1573
1574 static ssize_t show_ucode_version(struct device *d,
1575 struct device_attribute *attr, char *buf)
1576 {
1577 u32 len = sizeof(u32), tmp = 0;
1578 struct ipw_priv *p = dev_get_drvdata(d);
1579
1580 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1581 return 0;
1582
1583 return sprintf(buf, "0x%08x\n", tmp);
1584 }
1585
1586 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1587
1588 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1589 char *buf)
1590 {
1591 u32 len = sizeof(u32), tmp = 0;
1592 struct ipw_priv *p = dev_get_drvdata(d);
1593
1594 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1595 return 0;
1596
1597 return sprintf(buf, "0x%08x\n", tmp);
1598 }
1599
1600 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1601
1602 /*
1603 * Add a device attribute to view/control the delay between eeprom
1604 * operations.
1605 */
1606 static ssize_t show_eeprom_delay(struct device *d,
1607 struct device_attribute *attr, char *buf)
1608 {
1609 struct ipw_priv *p = dev_get_drvdata(d);
1610 int n = p->eeprom_delay;
1611 return sprintf(buf, "%i\n", n);
1612 }
1613 static ssize_t store_eeprom_delay(struct device *d,
1614 struct device_attribute *attr,
1615 const char *buf, size_t count)
1616 {
1617 struct ipw_priv *p = dev_get_drvdata(d);
1618 sscanf(buf, "%i", &p->eeprom_delay);
1619 return strnlen(buf, count);
1620 }
1621
1622 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1623 show_eeprom_delay, store_eeprom_delay);
1624
1625 static ssize_t show_command_event_reg(struct device *d,
1626 struct device_attribute *attr, char *buf)
1627 {
1628 u32 reg = 0;
1629 struct ipw_priv *p = dev_get_drvdata(d);
1630
1631 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1632 return sprintf(buf, "0x%08x\n", reg);
1633 }
1634 static ssize_t store_command_event_reg(struct device *d,
1635 struct device_attribute *attr,
1636 const char *buf, size_t count)
1637 {
1638 u32 reg;
1639 struct ipw_priv *p = dev_get_drvdata(d);
1640
1641 sscanf(buf, "%x", &reg);
1642 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1643 return strnlen(buf, count);
1644 }
1645
1646 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1647 show_command_event_reg, store_command_event_reg);
1648
1649 static ssize_t show_mem_gpio_reg(struct device *d,
1650 struct device_attribute *attr, char *buf)
1651 {
1652 u32 reg = 0;
1653 struct ipw_priv *p = dev_get_drvdata(d);
1654
1655 reg = ipw_read_reg32(p, 0x301100);
1656 return sprintf(buf, "0x%08x\n", reg);
1657 }
1658 static ssize_t store_mem_gpio_reg(struct device *d,
1659 struct device_attribute *attr,
1660 const char *buf, size_t count)
1661 {
1662 u32 reg;
1663 struct ipw_priv *p = dev_get_drvdata(d);
1664
1665 sscanf(buf, "%x", &reg);
1666 ipw_write_reg32(p, 0x301100, reg);
1667 return strnlen(buf, count);
1668 }
1669
1670 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1671 show_mem_gpio_reg, store_mem_gpio_reg);
1672
1673 static ssize_t show_indirect_dword(struct device *d,
1674 struct device_attribute *attr, char *buf)
1675 {
1676 u32 reg = 0;
1677 struct ipw_priv *priv = dev_get_drvdata(d);
1678
1679 if (priv->status & STATUS_INDIRECT_DWORD)
1680 reg = ipw_read_reg32(priv, priv->indirect_dword);
1681 else
1682 reg = 0;
1683
1684 return sprintf(buf, "0x%08x\n", reg);
1685 }
1686 static ssize_t store_indirect_dword(struct device *d,
1687 struct device_attribute *attr,
1688 const char *buf, size_t count)
1689 {
1690 struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692 sscanf(buf, "%x", &priv->indirect_dword);
1693 priv->status |= STATUS_INDIRECT_DWORD;
1694 return strnlen(buf, count);
1695 }
1696
1697 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1698 show_indirect_dword, store_indirect_dword);
1699
1700 static ssize_t show_indirect_byte(struct device *d,
1701 struct device_attribute *attr, char *buf)
1702 {
1703 u8 reg = 0;
1704 struct ipw_priv *priv = dev_get_drvdata(d);
1705
1706 if (priv->status & STATUS_INDIRECT_BYTE)
1707 reg = ipw_read_reg8(priv, priv->indirect_byte);
1708 else
1709 reg = 0;
1710
1711 return sprintf(buf, "0x%02x\n", reg);
1712 }
1713 static ssize_t store_indirect_byte(struct device *d,
1714 struct device_attribute *attr,
1715 const char *buf, size_t count)
1716 {
1717 struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719 sscanf(buf, "%x", &priv->indirect_byte);
1720 priv->status |= STATUS_INDIRECT_BYTE;
1721 return strnlen(buf, count);
1722 }
1723
1724 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1725 show_indirect_byte, store_indirect_byte);
1726
1727 static ssize_t show_direct_dword(struct device *d,
1728 struct device_attribute *attr, char *buf)
1729 {
1730 u32 reg = 0;
1731 struct ipw_priv *priv = dev_get_drvdata(d);
1732
1733 if (priv->status & STATUS_DIRECT_DWORD)
1734 reg = ipw_read32(priv, priv->direct_dword);
1735 else
1736 reg = 0;
1737
1738 return sprintf(buf, "0x%08x\n", reg);
1739 }
1740 static ssize_t store_direct_dword(struct device *d,
1741 struct device_attribute *attr,
1742 const char *buf, size_t count)
1743 {
1744 struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746 sscanf(buf, "%x", &priv->direct_dword);
1747 priv->status |= STATUS_DIRECT_DWORD;
1748 return strnlen(buf, count);
1749 }
1750
1751 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1752 show_direct_dword, store_direct_dword);
1753
1754 static int rf_kill_active(struct ipw_priv *priv)
1755 {
1756 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1757 priv->status |= STATUS_RF_KILL_HW;
1758 else
1759 priv->status &= ~STATUS_RF_KILL_HW;
1760
1761 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1762 }
1763
1764 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1765 char *buf)
1766 {
1767 /* 0 - RF kill not enabled
1768 1 - SW based RF kill active (sysfs)
1769 2 - HW based RF kill active
1770 3 - Both HW and SW baed RF kill active */
1771 struct ipw_priv *priv = dev_get_drvdata(d);
1772 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1773 (rf_kill_active(priv) ? 0x2 : 0x0);
1774 return sprintf(buf, "%i\n", val);
1775 }
1776
1777 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1778 {
1779 if ((disable_radio ? 1 : 0) ==
1780 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1781 return 0;
1782
1783 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1784 disable_radio ? "OFF" : "ON");
1785
1786 if (disable_radio) {
1787 priv->status |= STATUS_RF_KILL_SW;
1788
1789 if (priv->workqueue) {
1790 cancel_delayed_work(&priv->request_scan);
1791 cancel_delayed_work(&priv->request_direct_scan);
1792 cancel_delayed_work(&priv->request_passive_scan);
1793 cancel_delayed_work(&priv->scan_event);
1794 }
1795 queue_work(priv->workqueue, &priv->down);
1796 } else {
1797 priv->status &= ~STATUS_RF_KILL_SW;
1798 if (rf_kill_active(priv)) {
1799 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1800 "disabled by HW switch\n");
1801 /* Make sure the RF_KILL check timer is running */
1802 cancel_delayed_work(&priv->rf_kill);
1803 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1804 round_jiffies_relative(2 * HZ));
1805 } else
1806 queue_work(priv->workqueue, &priv->up);
1807 }
1808
1809 return 1;
1810 }
1811
1812 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1813 const char *buf, size_t count)
1814 {
1815 struct ipw_priv *priv = dev_get_drvdata(d);
1816
1817 ipw_radio_kill_sw(priv, buf[0] == '1');
1818
1819 return count;
1820 }
1821
1822 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1823
1824 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1825 char *buf)
1826 {
1827 struct ipw_priv *priv = dev_get_drvdata(d);
1828 int pos = 0, len = 0;
1829 if (priv->config & CFG_SPEED_SCAN) {
1830 while (priv->speed_scan[pos] != 0)
1831 len += sprintf(&buf[len], "%d ",
1832 priv->speed_scan[pos++]);
1833 return len + sprintf(&buf[len], "\n");
1834 }
1835
1836 return sprintf(buf, "0\n");
1837 }
1838
1839 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1840 const char *buf, size_t count)
1841 {
1842 struct ipw_priv *priv = dev_get_drvdata(d);
1843 int channel, pos = 0;
1844 const char *p = buf;
1845
1846 /* list of space separated channels to scan, optionally ending with 0 */
1847 while ((channel = simple_strtol(p, NULL, 0))) {
1848 if (pos == MAX_SPEED_SCAN - 1) {
1849 priv->speed_scan[pos] = 0;
1850 break;
1851 }
1852
1853 if (libipw_is_valid_channel(priv->ieee, channel))
1854 priv->speed_scan[pos++] = channel;
1855 else
1856 IPW_WARNING("Skipping invalid channel request: %d\n",
1857 channel);
1858 p = strchr(p, ' ');
1859 if (!p)
1860 break;
1861 while (*p == ' ' || *p == '\t')
1862 p++;
1863 }
1864
1865 if (pos == 0)
1866 priv->config &= ~CFG_SPEED_SCAN;
1867 else {
1868 priv->speed_scan_pos = 0;
1869 priv->config |= CFG_SPEED_SCAN;
1870 }
1871
1872 return count;
1873 }
1874
1875 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1876 store_speed_scan);
1877
1878 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1879 char *buf)
1880 {
1881 struct ipw_priv *priv = dev_get_drvdata(d);
1882 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1883 }
1884
1885 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1886 const char *buf, size_t count)
1887 {
1888 struct ipw_priv *priv = dev_get_drvdata(d);
1889 if (buf[0] == '1')
1890 priv->config |= CFG_NET_STATS;
1891 else
1892 priv->config &= ~CFG_NET_STATS;
1893
1894 return count;
1895 }
1896
1897 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1898 show_net_stats, store_net_stats);
1899
1900 static ssize_t show_channels(struct device *d,
1901 struct device_attribute *attr,
1902 char *buf)
1903 {
1904 struct ipw_priv *priv = dev_get_drvdata(d);
1905 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1906 int len = 0, i;
1907
1908 len = sprintf(&buf[len],
1909 "Displaying %d channels in 2.4Ghz band "
1910 "(802.11bg):\n", geo->bg_channels);
1911
1912 for (i = 0; i < geo->bg_channels; i++) {
1913 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1914 geo->bg[i].channel,
1915 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1916 " (radar spectrum)" : "",
1917 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1918 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1919 ? "" : ", IBSS",
1920 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1921 "passive only" : "active/passive",
1922 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1923 "B" : "B/G");
1924 }
1925
1926 len += sprintf(&buf[len],
1927 "Displaying %d channels in 5.2Ghz band "
1928 "(802.11a):\n", geo->a_channels);
1929 for (i = 0; i < geo->a_channels; i++) {
1930 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1931 geo->a[i].channel,
1932 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1933 " (radar spectrum)" : "",
1934 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1935 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1936 ? "" : ", IBSS",
1937 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1938 "passive only" : "active/passive");
1939 }
1940
1941 return len;
1942 }
1943
1944 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1945
1946 static void notify_wx_assoc_event(struct ipw_priv *priv)
1947 {
1948 union iwreq_data wrqu;
1949 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1950 if (priv->status & STATUS_ASSOCIATED)
1951 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1952 else
1953 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1954 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1955 }
1956
1957 static void ipw_irq_tasklet(struct ipw_priv *priv)
1958 {
1959 u32 inta, inta_mask, handled = 0;
1960 unsigned long flags;
1961 int rc = 0;
1962
1963 spin_lock_irqsave(&priv->irq_lock, flags);
1964
1965 inta = ipw_read32(priv, IPW_INTA_RW);
1966 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1967 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1968
1969 /* Add any cached INTA values that need to be handled */
1970 inta |= priv->isr_inta;
1971
1972 spin_unlock_irqrestore(&priv->irq_lock, flags);
1973
1974 spin_lock_irqsave(&priv->lock, flags);
1975
1976 /* handle all the justifications for the interrupt */
1977 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1978 ipw_rx(priv);
1979 handled |= IPW_INTA_BIT_RX_TRANSFER;
1980 }
1981
1982 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1983 IPW_DEBUG_HC("Command completed.\n");
1984 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1985 priv->status &= ~STATUS_HCMD_ACTIVE;
1986 wake_up_interruptible(&priv->wait_command_queue);
1987 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1988 }
1989
1990 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1991 IPW_DEBUG_TX("TX_QUEUE_1\n");
1992 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1993 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1994 }
1995
1996 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1997 IPW_DEBUG_TX("TX_QUEUE_2\n");
1998 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1999 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2000 }
2001
2002 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2003 IPW_DEBUG_TX("TX_QUEUE_3\n");
2004 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2005 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2006 }
2007
2008 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2009 IPW_DEBUG_TX("TX_QUEUE_4\n");
2010 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2011 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2012 }
2013
2014 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2015 IPW_WARNING("STATUS_CHANGE\n");
2016 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2017 }
2018
2019 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2020 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2021 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2022 }
2023
2024 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2025 IPW_WARNING("HOST_CMD_DONE\n");
2026 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2027 }
2028
2029 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2030 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2031 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2032 }
2033
2034 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2035 IPW_WARNING("PHY_OFF_DONE\n");
2036 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2037 }
2038
2039 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2040 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2041 priv->status |= STATUS_RF_KILL_HW;
2042 wake_up_interruptible(&priv->wait_command_queue);
2043 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2044 cancel_delayed_work(&priv->request_scan);
2045 cancel_delayed_work(&priv->request_direct_scan);
2046 cancel_delayed_work(&priv->request_passive_scan);
2047 cancel_delayed_work(&priv->scan_event);
2048 schedule_work(&priv->link_down);
2049 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2050 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2051 }
2052
2053 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2054 IPW_WARNING("Firmware error detected. Restarting.\n");
2055 if (priv->error) {
2056 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2057 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2058 struct ipw_fw_error *error =
2059 ipw_alloc_error_log(priv);
2060 ipw_dump_error_log(priv, error);
2061 kfree(error);
2062 }
2063 } else {
2064 priv->error = ipw_alloc_error_log(priv);
2065 if (priv->error)
2066 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2067 else
2068 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2069 "log.\n");
2070 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2071 ipw_dump_error_log(priv, priv->error);
2072 }
2073
2074 /* XXX: If hardware encryption is for WPA/WPA2,
2075 * we have to notify the supplicant. */
2076 if (priv->ieee->sec.encrypt) {
2077 priv->status &= ~STATUS_ASSOCIATED;
2078 notify_wx_assoc_event(priv);
2079 }
2080
2081 /* Keep the restart process from trying to send host
2082 * commands by clearing the INIT status bit */
2083 priv->status &= ~STATUS_INIT;
2084
2085 /* Cancel currently queued command. */
2086 priv->status &= ~STATUS_HCMD_ACTIVE;
2087 wake_up_interruptible(&priv->wait_command_queue);
2088
2089 queue_work(priv->workqueue, &priv->adapter_restart);
2090 handled |= IPW_INTA_BIT_FATAL_ERROR;
2091 }
2092
2093 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2094 IPW_ERROR("Parity error\n");
2095 handled |= IPW_INTA_BIT_PARITY_ERROR;
2096 }
2097
2098 if (handled != inta) {
2099 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2100 }
2101
2102 spin_unlock_irqrestore(&priv->lock, flags);
2103
2104 /* enable all interrupts */
2105 ipw_enable_interrupts(priv);
2106 }
2107
2108 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2109 static char *get_cmd_string(u8 cmd)
2110 {
2111 switch (cmd) {
2112 IPW_CMD(HOST_COMPLETE);
2113 IPW_CMD(POWER_DOWN);
2114 IPW_CMD(SYSTEM_CONFIG);
2115 IPW_CMD(MULTICAST_ADDRESS);
2116 IPW_CMD(SSID);
2117 IPW_CMD(ADAPTER_ADDRESS);
2118 IPW_CMD(PORT_TYPE);
2119 IPW_CMD(RTS_THRESHOLD);
2120 IPW_CMD(FRAG_THRESHOLD);
2121 IPW_CMD(POWER_MODE);
2122 IPW_CMD(WEP_KEY);
2123 IPW_CMD(TGI_TX_KEY);
2124 IPW_CMD(SCAN_REQUEST);
2125 IPW_CMD(SCAN_REQUEST_EXT);
2126 IPW_CMD(ASSOCIATE);
2127 IPW_CMD(SUPPORTED_RATES);
2128 IPW_CMD(SCAN_ABORT);
2129 IPW_CMD(TX_FLUSH);
2130 IPW_CMD(QOS_PARAMETERS);
2131 IPW_CMD(DINO_CONFIG);
2132 IPW_CMD(RSN_CAPABILITIES);
2133 IPW_CMD(RX_KEY);
2134 IPW_CMD(CARD_DISABLE);
2135 IPW_CMD(SEED_NUMBER);
2136 IPW_CMD(TX_POWER);
2137 IPW_CMD(COUNTRY_INFO);
2138 IPW_CMD(AIRONET_INFO);
2139 IPW_CMD(AP_TX_POWER);
2140 IPW_CMD(CCKM_INFO);
2141 IPW_CMD(CCX_VER_INFO);
2142 IPW_CMD(SET_CALIBRATION);
2143 IPW_CMD(SENSITIVITY_CALIB);
2144 IPW_CMD(RETRY_LIMIT);
2145 IPW_CMD(IPW_PRE_POWER_DOWN);
2146 IPW_CMD(VAP_BEACON_TEMPLATE);
2147 IPW_CMD(VAP_DTIM_PERIOD);
2148 IPW_CMD(EXT_SUPPORTED_RATES);
2149 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2150 IPW_CMD(VAP_QUIET_INTERVALS);
2151 IPW_CMD(VAP_CHANNEL_SWITCH);
2152 IPW_CMD(VAP_MANDATORY_CHANNELS);
2153 IPW_CMD(VAP_CELL_PWR_LIMIT);
2154 IPW_CMD(VAP_CF_PARAM_SET);
2155 IPW_CMD(VAP_SET_BEACONING_STATE);
2156 IPW_CMD(MEASUREMENT);
2157 IPW_CMD(POWER_CAPABILITY);
2158 IPW_CMD(SUPPORTED_CHANNELS);
2159 IPW_CMD(TPC_REPORT);
2160 IPW_CMD(WME_INFO);
2161 IPW_CMD(PRODUCTION_COMMAND);
2162 default:
2163 return "UNKNOWN";
2164 }
2165 }
2166
2167 #define HOST_COMPLETE_TIMEOUT HZ
2168
2169 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2170 {
2171 int rc = 0;
2172 unsigned long flags;
2173
2174 spin_lock_irqsave(&priv->lock, flags);
2175 if (priv->status & STATUS_HCMD_ACTIVE) {
2176 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2177 get_cmd_string(cmd->cmd));
2178 spin_unlock_irqrestore(&priv->lock, flags);
2179 return -EAGAIN;
2180 }
2181
2182 priv->status |= STATUS_HCMD_ACTIVE;
2183
2184 if (priv->cmdlog) {
2185 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2186 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2187 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2188 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2189 cmd->len);
2190 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2191 }
2192
2193 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2194 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2195 priv->status);
2196
2197 #ifndef DEBUG_CMD_WEP_KEY
2198 if (cmd->cmd == IPW_CMD_WEP_KEY)
2199 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2200 else
2201 #endif
2202 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2203
2204 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2205 if (rc) {
2206 priv->status &= ~STATUS_HCMD_ACTIVE;
2207 IPW_ERROR("Failed to send %s: Reason %d\n",
2208 get_cmd_string(cmd->cmd), rc);
2209 spin_unlock_irqrestore(&priv->lock, flags);
2210 goto exit;
2211 }
2212 spin_unlock_irqrestore(&priv->lock, flags);
2213
2214 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2215 !(priv->
2216 status & STATUS_HCMD_ACTIVE),
2217 HOST_COMPLETE_TIMEOUT);
2218 if (rc == 0) {
2219 spin_lock_irqsave(&priv->lock, flags);
2220 if (priv->status & STATUS_HCMD_ACTIVE) {
2221 IPW_ERROR("Failed to send %s: Command timed out.\n",
2222 get_cmd_string(cmd->cmd));
2223 priv->status &= ~STATUS_HCMD_ACTIVE;
2224 spin_unlock_irqrestore(&priv->lock, flags);
2225 rc = -EIO;
2226 goto exit;
2227 }
2228 spin_unlock_irqrestore(&priv->lock, flags);
2229 } else
2230 rc = 0;
2231
2232 if (priv->status & STATUS_RF_KILL_HW) {
2233 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2234 get_cmd_string(cmd->cmd));
2235 rc = -EIO;
2236 goto exit;
2237 }
2238
2239 exit:
2240 if (priv->cmdlog) {
2241 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2242 priv->cmdlog_pos %= priv->cmdlog_len;
2243 }
2244 return rc;
2245 }
2246
2247 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2248 {
2249 struct host_cmd cmd = {
2250 .cmd = command,
2251 };
2252
2253 return __ipw_send_cmd(priv, &cmd);
2254 }
2255
2256 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2257 void *data)
2258 {
2259 struct host_cmd cmd = {
2260 .cmd = command,
2261 .len = len,
2262 .param = data,
2263 };
2264
2265 return __ipw_send_cmd(priv, &cmd);
2266 }
2267
2268 static int ipw_send_host_complete(struct ipw_priv *priv)
2269 {
2270 if (!priv) {
2271 IPW_ERROR("Invalid args\n");
2272 return -1;
2273 }
2274
2275 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2276 }
2277
2278 static int ipw_send_system_config(struct ipw_priv *priv)
2279 {
2280 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2281 sizeof(priv->sys_config),
2282 &priv->sys_config);
2283 }
2284
2285 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2286 {
2287 if (!priv || !ssid) {
2288 IPW_ERROR("Invalid args\n");
2289 return -1;
2290 }
2291
2292 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2293 ssid);
2294 }
2295
2296 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2297 {
2298 if (!priv || !mac) {
2299 IPW_ERROR("Invalid args\n");
2300 return -1;
2301 }
2302
2303 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2304 priv->net_dev->name, mac);
2305
2306 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2307 }
2308
2309 /*
2310 * NOTE: This must be executed from our workqueue as it results in udelay
2311 * being called which may corrupt the keyboard if executed on default
2312 * workqueue
2313 */
2314 static void ipw_adapter_restart(void *adapter)
2315 {
2316 struct ipw_priv *priv = adapter;
2317
2318 if (priv->status & STATUS_RF_KILL_MASK)
2319 return;
2320
2321 ipw_down(priv);
2322
2323 if (priv->assoc_network &&
2324 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2325 ipw_remove_current_network(priv);
2326
2327 if (ipw_up(priv)) {
2328 IPW_ERROR("Failed to up device\n");
2329 return;
2330 }
2331 }
2332
2333 static void ipw_bg_adapter_restart(struct work_struct *work)
2334 {
2335 struct ipw_priv *priv =
2336 container_of(work, struct ipw_priv, adapter_restart);
2337 mutex_lock(&priv->mutex);
2338 ipw_adapter_restart(priv);
2339 mutex_unlock(&priv->mutex);
2340 }
2341
2342 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2343
2344 static void ipw_scan_check(void *data)
2345 {
2346 struct ipw_priv *priv = data;
2347 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2348 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2349 "adapter after (%dms).\n",
2350 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2351 queue_work(priv->workqueue, &priv->adapter_restart);
2352 }
2353 }
2354
2355 static void ipw_bg_scan_check(struct work_struct *work)
2356 {
2357 struct ipw_priv *priv =
2358 container_of(work, struct ipw_priv, scan_check.work);
2359 mutex_lock(&priv->mutex);
2360 ipw_scan_check(priv);
2361 mutex_unlock(&priv->mutex);
2362 }
2363
2364 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2365 struct ipw_scan_request_ext *request)
2366 {
2367 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2368 sizeof(*request), request);
2369 }
2370
2371 static int ipw_send_scan_abort(struct ipw_priv *priv)
2372 {
2373 if (!priv) {
2374 IPW_ERROR("Invalid args\n");
2375 return -1;
2376 }
2377
2378 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2379 }
2380
2381 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2382 {
2383 struct ipw_sensitivity_calib calib = {
2384 .beacon_rssi_raw = cpu_to_le16(sens),
2385 };
2386
2387 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2388 &calib);
2389 }
2390
2391 static int ipw_send_associate(struct ipw_priv *priv,
2392 struct ipw_associate *associate)
2393 {
2394 if (!priv || !associate) {
2395 IPW_ERROR("Invalid args\n");
2396 return -1;
2397 }
2398
2399 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2400 associate);
2401 }
2402
2403 static int ipw_send_supported_rates(struct ipw_priv *priv,
2404 struct ipw_supported_rates *rates)
2405 {
2406 if (!priv || !rates) {
2407 IPW_ERROR("Invalid args\n");
2408 return -1;
2409 }
2410
2411 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2412 rates);
2413 }
2414
2415 static int ipw_set_random_seed(struct ipw_priv *priv)
2416 {
2417 u32 val;
2418
2419 if (!priv) {
2420 IPW_ERROR("Invalid args\n");
2421 return -1;
2422 }
2423
2424 get_random_bytes(&val, sizeof(val));
2425
2426 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2427 }
2428
2429 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2430 {
2431 __le32 v = cpu_to_le32(phy_off);
2432 if (!priv) {
2433 IPW_ERROR("Invalid args\n");
2434 return -1;
2435 }
2436
2437 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2438 }
2439
2440 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2441 {
2442 if (!priv || !power) {
2443 IPW_ERROR("Invalid args\n");
2444 return -1;
2445 }
2446
2447 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2448 }
2449
2450 static int ipw_set_tx_power(struct ipw_priv *priv)
2451 {
2452 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2453 struct ipw_tx_power tx_power;
2454 s8 max_power;
2455 int i;
2456
2457 memset(&tx_power, 0, sizeof(tx_power));
2458
2459 /* configure device for 'G' band */
2460 tx_power.ieee_mode = IPW_G_MODE;
2461 tx_power.num_channels = geo->bg_channels;
2462 for (i = 0; i < geo->bg_channels; i++) {
2463 max_power = geo->bg[i].max_power;
2464 tx_power.channels_tx_power[i].channel_number =
2465 geo->bg[i].channel;
2466 tx_power.channels_tx_power[i].tx_power = max_power ?
2467 min(max_power, priv->tx_power) : priv->tx_power;
2468 }
2469 if (ipw_send_tx_power(priv, &tx_power))
2470 return -EIO;
2471
2472 /* configure device to also handle 'B' band */
2473 tx_power.ieee_mode = IPW_B_MODE;
2474 if (ipw_send_tx_power(priv, &tx_power))
2475 return -EIO;
2476
2477 /* configure device to also handle 'A' band */
2478 if (priv->ieee->abg_true) {
2479 tx_power.ieee_mode = IPW_A_MODE;
2480 tx_power.num_channels = geo->a_channels;
2481 for (i = 0; i < tx_power.num_channels; i++) {
2482 max_power = geo->a[i].max_power;
2483 tx_power.channels_tx_power[i].channel_number =
2484 geo->a[i].channel;
2485 tx_power.channels_tx_power[i].tx_power = max_power ?
2486 min(max_power, priv->tx_power) : priv->tx_power;
2487 }
2488 if (ipw_send_tx_power(priv, &tx_power))
2489 return -EIO;
2490 }
2491 return 0;
2492 }
2493
2494 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2495 {
2496 struct ipw_rts_threshold rts_threshold = {
2497 .rts_threshold = cpu_to_le16(rts),
2498 };
2499
2500 if (!priv) {
2501 IPW_ERROR("Invalid args\n");
2502 return -1;
2503 }
2504
2505 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2506 sizeof(rts_threshold), &rts_threshold);
2507 }
2508
2509 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2510 {
2511 struct ipw_frag_threshold frag_threshold = {
2512 .frag_threshold = cpu_to_le16(frag),
2513 };
2514
2515 if (!priv) {
2516 IPW_ERROR("Invalid args\n");
2517 return -1;
2518 }
2519
2520 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2521 sizeof(frag_threshold), &frag_threshold);
2522 }
2523
2524 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2525 {
2526 __le32 param;
2527
2528 if (!priv) {
2529 IPW_ERROR("Invalid args\n");
2530 return -1;
2531 }
2532
2533 /* If on battery, set to 3, if AC set to CAM, else user
2534 * level */
2535 switch (mode) {
2536 case IPW_POWER_BATTERY:
2537 param = cpu_to_le32(IPW_POWER_INDEX_3);
2538 break;
2539 case IPW_POWER_AC:
2540 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2541 break;
2542 default:
2543 param = cpu_to_le32(mode);
2544 break;
2545 }
2546
2547 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2548 &param);
2549 }
2550
2551 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2552 {
2553 struct ipw_retry_limit retry_limit = {
2554 .short_retry_limit = slimit,
2555 .long_retry_limit = llimit
2556 };
2557
2558 if (!priv) {
2559 IPW_ERROR("Invalid args\n");
2560 return -1;
2561 }
2562
2563 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2564 &retry_limit);
2565 }
2566
2567 /*
2568 * The IPW device contains a Microwire compatible EEPROM that stores
2569 * various data like the MAC address. Usually the firmware has exclusive
2570 * access to the eeprom, but during device initialization (before the
2571 * device driver has sent the HostComplete command to the firmware) the
2572 * device driver has read access to the EEPROM by way of indirect addressing
2573 * through a couple of memory mapped registers.
2574 *
2575 * The following is a simplified implementation for pulling data out of the
2576 * the eeprom, along with some helper functions to find information in
2577 * the per device private data's copy of the eeprom.
2578 *
2579 * NOTE: To better understand how these functions work (i.e what is a chip
2580 * select and why do have to keep driving the eeprom clock?), read
2581 * just about any data sheet for a Microwire compatible EEPROM.
2582 */
2583
2584 /* write a 32 bit value into the indirect accessor register */
2585 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2586 {
2587 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2588
2589 /* the eeprom requires some time to complete the operation */
2590 udelay(p->eeprom_delay);
2591
2592 return;
2593 }
2594
2595 /* perform a chip select operation */
2596 static void eeprom_cs(struct ipw_priv *priv)
2597 {
2598 eeprom_write_reg(priv, 0);
2599 eeprom_write_reg(priv, EEPROM_BIT_CS);
2600 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2601 eeprom_write_reg(priv, EEPROM_BIT_CS);
2602 }
2603
2604 /* perform a chip select operation */
2605 static void eeprom_disable_cs(struct ipw_priv *priv)
2606 {
2607 eeprom_write_reg(priv, EEPROM_BIT_CS);
2608 eeprom_write_reg(priv, 0);
2609 eeprom_write_reg(priv, EEPROM_BIT_SK);
2610 }
2611
2612 /* push a single bit down to the eeprom */
2613 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2614 {
2615 int d = (bit ? EEPROM_BIT_DI : 0);
2616 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2617 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2618 }
2619
2620 /* push an opcode followed by an address down to the eeprom */
2621 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2622 {
2623 int i;
2624
2625 eeprom_cs(priv);
2626 eeprom_write_bit(priv, 1);
2627 eeprom_write_bit(priv, op & 2);
2628 eeprom_write_bit(priv, op & 1);
2629 for (i = 7; i >= 0; i--) {
2630 eeprom_write_bit(priv, addr & (1 << i));
2631 }
2632 }
2633
2634 /* pull 16 bits off the eeprom, one bit at a time */
2635 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2636 {
2637 int i;
2638 u16 r = 0;
2639
2640 /* Send READ Opcode */
2641 eeprom_op(priv, EEPROM_CMD_READ, addr);
2642
2643 /* Send dummy bit */
2644 eeprom_write_reg(priv, EEPROM_BIT_CS);
2645
2646 /* Read the byte off the eeprom one bit at a time */
2647 for (i = 0; i < 16; i++) {
2648 u32 data = 0;
2649 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2650 eeprom_write_reg(priv, EEPROM_BIT_CS);
2651 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2652 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2653 }
2654
2655 /* Send another dummy bit */
2656 eeprom_write_reg(priv, 0);
2657 eeprom_disable_cs(priv);
2658
2659 return r;
2660 }
2661
2662 /* helper function for pulling the mac address out of the private */
2663 /* data's copy of the eeprom data */
2664 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2665 {
2666 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2667 }
2668
2669 /*
2670 * Either the device driver (i.e. the host) or the firmware can
2671 * load eeprom data into the designated region in SRAM. If neither
2672 * happens then the FW will shutdown with a fatal error.
2673 *
2674 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2675 * bit needs region of shared SRAM needs to be non-zero.
2676 */
2677 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2678 {
2679 int i;
2680 __le16 *eeprom = (__le16 *) priv->eeprom;
2681
2682 IPW_DEBUG_TRACE(">>\n");
2683
2684 /* read entire contents of eeprom into private buffer */
2685 for (i = 0; i < 128; i++)
2686 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2687
2688 /*
2689 If the data looks correct, then copy it to our private
2690 copy. Otherwise let the firmware know to perform the operation
2691 on its own.
2692 */
2693 if (priv->eeprom[EEPROM_VERSION] != 0) {
2694 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2695
2696 /* write the eeprom data to sram */
2697 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2698 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2699
2700 /* Do not load eeprom data on fatal error or suspend */
2701 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2702 } else {
2703 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2704
2705 /* Load eeprom data on fatal error or suspend */
2706 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2707 }
2708
2709 IPW_DEBUG_TRACE("<<\n");
2710 }
2711
2712 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2713 {
2714 count >>= 2;
2715 if (!count)
2716 return;
2717 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2718 while (count--)
2719 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2720 }
2721
2722 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2723 {
2724 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2725 CB_NUMBER_OF_ELEMENTS_SMALL *
2726 sizeof(struct command_block));
2727 }
2728
2729 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2730 { /* start dma engine but no transfers yet */
2731
2732 IPW_DEBUG_FW(">> : \n");
2733
2734 /* Start the dma */
2735 ipw_fw_dma_reset_command_blocks(priv);
2736
2737 /* Write CB base address */
2738 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2739
2740 IPW_DEBUG_FW("<< : \n");
2741 return 0;
2742 }
2743
2744 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2745 {
2746 u32 control = 0;
2747
2748 IPW_DEBUG_FW(">> :\n");
2749
2750 /* set the Stop and Abort bit */
2751 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2752 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2753 priv->sram_desc.last_cb_index = 0;
2754
2755 IPW_DEBUG_FW("<< \n");
2756 }
2757
2758 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2759 struct command_block *cb)
2760 {
2761 u32 address =
2762 IPW_SHARED_SRAM_DMA_CONTROL +
2763 (sizeof(struct command_block) * index);
2764 IPW_DEBUG_FW(">> :\n");
2765
2766 ipw_write_indirect(priv, address, (u8 *) cb,
2767 (int)sizeof(struct command_block));
2768
2769 IPW_DEBUG_FW("<< :\n");
2770 return 0;
2771
2772 }
2773
2774 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2775 {
2776 u32 control = 0;
2777 u32 index = 0;
2778
2779 IPW_DEBUG_FW(">> :\n");
2780
2781 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2782 ipw_fw_dma_write_command_block(priv, index,
2783 &priv->sram_desc.cb_list[index]);
2784
2785 /* Enable the DMA in the CSR register */
2786 ipw_clear_bit(priv, IPW_RESET_REG,
2787 IPW_RESET_REG_MASTER_DISABLED |
2788 IPW_RESET_REG_STOP_MASTER);
2789
2790 /* Set the Start bit. */
2791 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2792 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2793
2794 IPW_DEBUG_FW("<< :\n");
2795 return 0;
2796 }
2797
2798 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2799 {
2800 u32 address;
2801 u32 register_value = 0;
2802 u32 cb_fields_address = 0;
2803
2804 IPW_DEBUG_FW(">> :\n");
2805 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2806 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2807
2808 /* Read the DMA Controlor register */
2809 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2810 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2811
2812 /* Print the CB values */
2813 cb_fields_address = address;
2814 register_value = ipw_read_reg32(priv, cb_fields_address);
2815 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2816
2817 cb_fields_address += sizeof(u32);
2818 register_value = ipw_read_reg32(priv, cb_fields_address);
2819 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2820
2821 cb_fields_address += sizeof(u32);
2822 register_value = ipw_read_reg32(priv, cb_fields_address);
2823 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2824 register_value);
2825
2826 cb_fields_address += sizeof(u32);
2827 register_value = ipw_read_reg32(priv, cb_fields_address);
2828 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2829
2830 IPW_DEBUG_FW(">> :\n");
2831 }
2832
2833 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2834 {
2835 u32 current_cb_address = 0;
2836 u32 current_cb_index = 0;
2837
2838 IPW_DEBUG_FW("<< :\n");
2839 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2840
2841 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2842 sizeof(struct command_block);
2843
2844 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2845 current_cb_index, current_cb_address);
2846
2847 IPW_DEBUG_FW(">> :\n");
2848 return current_cb_index;
2849
2850 }
2851
2852 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2853 u32 src_address,
2854 u32 dest_address,
2855 u32 length,
2856 int interrupt_enabled, int is_last)
2857 {
2858
2859 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2860 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2861 CB_DEST_SIZE_LONG;
2862 struct command_block *cb;
2863 u32 last_cb_element = 0;
2864
2865 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2866 src_address, dest_address, length);
2867
2868 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2869 return -1;
2870
2871 last_cb_element = priv->sram_desc.last_cb_index;
2872 cb = &priv->sram_desc.cb_list[last_cb_element];
2873 priv->sram_desc.last_cb_index++;
2874
2875 /* Calculate the new CB control word */
2876 if (interrupt_enabled)
2877 control |= CB_INT_ENABLED;
2878
2879 if (is_last)
2880 control |= CB_LAST_VALID;
2881
2882 control |= length;
2883
2884 /* Calculate the CB Element's checksum value */
2885 cb->status = control ^ src_address ^ dest_address;
2886
2887 /* Copy the Source and Destination addresses */
2888 cb->dest_addr = dest_address;
2889 cb->source_addr = src_address;
2890
2891 /* Copy the Control Word last */
2892 cb->control = control;
2893
2894 return 0;
2895 }
2896
2897 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2898 int nr, u32 dest_address, u32 len)
2899 {
2900 int ret, i;
2901 u32 size;
2902
2903 IPW_DEBUG_FW(">> \n");
2904 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2905 nr, dest_address, len);
2906
2907 for (i = 0; i < nr; i++) {
2908 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2909 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2910 dest_address +
2911 i * CB_MAX_LENGTH, size,
2912 0, 0);
2913 if (ret) {
2914 IPW_DEBUG_FW_INFO(": Failed\n");
2915 return -1;
2916 } else
2917 IPW_DEBUG_FW_INFO(": Added new cb\n");
2918 }
2919
2920 IPW_DEBUG_FW("<< \n");
2921 return 0;
2922 }
2923
2924 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2925 {
2926 u32 current_index = 0, previous_index;
2927 u32 watchdog = 0;
2928
2929 IPW_DEBUG_FW(">> : \n");
2930
2931 current_index = ipw_fw_dma_command_block_index(priv);
2932 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2933 (int)priv->sram_desc.last_cb_index);
2934
2935 while (current_index < priv->sram_desc.last_cb_index) {
2936 udelay(50);
2937 previous_index = current_index;
2938 current_index = ipw_fw_dma_command_block_index(priv);
2939
2940 if (previous_index < current_index) {
2941 watchdog = 0;
2942 continue;
2943 }
2944 if (++watchdog > 400) {
2945 IPW_DEBUG_FW_INFO("Timeout\n");
2946 ipw_fw_dma_dump_command_block(priv);
2947 ipw_fw_dma_abort(priv);
2948 return -1;
2949 }
2950 }
2951
2952 ipw_fw_dma_abort(priv);
2953
2954 /*Disable the DMA in the CSR register */
2955 ipw_set_bit(priv, IPW_RESET_REG,
2956 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2957
2958 IPW_DEBUG_FW("<< dmaWaitSync \n");
2959 return 0;
2960 }
2961
2962 static void ipw_remove_current_network(struct ipw_priv *priv)
2963 {
2964 struct list_head *element, *safe;
2965 struct libipw_network *network = NULL;
2966 unsigned long flags;
2967
2968 spin_lock_irqsave(&priv->ieee->lock, flags);
2969 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2970 network = list_entry(element, struct libipw_network, list);
2971 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2972 list_del(element);
2973 list_add_tail(&network->list,
2974 &priv->ieee->network_free_list);
2975 }
2976 }
2977 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2978 }
2979
2980 /**
2981 * Check that card is still alive.
2982 * Reads debug register from domain0.
2983 * If card is present, pre-defined value should
2984 * be found there.
2985 *
2986 * @param priv
2987 * @return 1 if card is present, 0 otherwise
2988 */
2989 static inline int ipw_alive(struct ipw_priv *priv)
2990 {
2991 return ipw_read32(priv, 0x90) == 0xd55555d5;
2992 }
2993
2994 /* timeout in msec, attempted in 10-msec quanta */
2995 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2996 int timeout)
2997 {
2998 int i = 0;
2999
3000 do {
3001 if ((ipw_read32(priv, addr) & mask) == mask)
3002 return i;
3003 mdelay(10);
3004 i += 10;
3005 } while (i < timeout);
3006
3007 return -ETIME;
3008 }
3009
3010 /* These functions load the firmware and micro code for the operation of
3011 * the ipw hardware. It assumes the buffer has all the bits for the
3012 * image and the caller is handling the memory allocation and clean up.
3013 */
3014
3015 static int ipw_stop_master(struct ipw_priv *priv)
3016 {
3017 int rc;
3018
3019 IPW_DEBUG_TRACE(">> \n");
3020 /* stop master. typical delay - 0 */
3021 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3022
3023 /* timeout is in msec, polled in 10-msec quanta */
3024 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3025 IPW_RESET_REG_MASTER_DISABLED, 100);
3026 if (rc < 0) {
3027 IPW_ERROR("wait for stop master failed after 100ms\n");
3028 return -1;
3029 }
3030
3031 IPW_DEBUG_INFO("stop master %dms\n", rc);
3032
3033 return rc;
3034 }
3035
3036 static void ipw_arc_release(struct ipw_priv *priv)
3037 {
3038 IPW_DEBUG_TRACE(">> \n");
3039 mdelay(5);
3040
3041 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3042
3043 /* no one knows timing, for safety add some delay */
3044 mdelay(5);
3045 }
3046
3047 struct fw_chunk {
3048 __le32 address;
3049 __le32 length;
3050 };
3051
3052 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3053 {
3054 int rc = 0, i, addr;
3055 u8 cr = 0;
3056 __le16 *image;
3057
3058 image = (__le16 *) data;
3059
3060 IPW_DEBUG_TRACE(">> \n");
3061
3062 rc = ipw_stop_master(priv);
3063
3064 if (rc < 0)
3065 return rc;
3066
3067 for (addr = IPW_SHARED_LOWER_BOUND;
3068 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3069 ipw_write32(priv, addr, 0);
3070 }
3071
3072 /* no ucode (yet) */
3073 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3074 /* destroy DMA queues */
3075 /* reset sequence */
3076
3077 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3078 ipw_arc_release(priv);
3079 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3080 mdelay(1);
3081
3082 /* reset PHY */
3083 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3084 mdelay(1);
3085
3086 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3087 mdelay(1);
3088
3089 /* enable ucode store */
3090 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3091 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3092 mdelay(1);
3093
3094 /* write ucode */
3095 /**
3096 * @bug
3097 * Do NOT set indirect address register once and then
3098 * store data to indirect data register in the loop.
3099 * It seems very reasonable, but in this case DINO do not
3100 * accept ucode. It is essential to set address each time.
3101 */
3102 /* load new ipw uCode */
3103 for (i = 0; i < len / 2; i++)
3104 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3105 le16_to_cpu(image[i]));
3106
3107 /* enable DINO */
3108 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3109 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3110
3111 /* this is where the igx / win driver deveates from the VAP driver. */
3112
3113 /* wait for alive response */
3114 for (i = 0; i < 100; i++) {
3115 /* poll for incoming data */
3116 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3117 if (cr & DINO_RXFIFO_DATA)
3118 break;
3119 mdelay(1);
3120 }
3121
3122 if (cr & DINO_RXFIFO_DATA) {
3123 /* alive_command_responce size is NOT multiple of 4 */
3124 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3125
3126 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3127 response_buffer[i] =
3128 cpu_to_le32(ipw_read_reg32(priv,
3129 IPW_BASEBAND_RX_FIFO_READ));
3130 memcpy(&priv->dino_alive, response_buffer,
3131 sizeof(priv->dino_alive));
3132 if (priv->dino_alive.alive_command == 1
3133 && priv->dino_alive.ucode_valid == 1) {
3134 rc = 0;
3135 IPW_DEBUG_INFO
3136 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3137 "of %02d/%02d/%02d %02d:%02d\n",
3138 priv->dino_alive.software_revision,
3139 priv->dino_alive.software_revision,
3140 priv->dino_alive.device_identifier,
3141 priv->dino_alive.device_identifier,
3142 priv->dino_alive.time_stamp[0],
3143 priv->dino_alive.time_stamp[1],
3144 priv->dino_alive.time_stamp[2],
3145 priv->dino_alive.time_stamp[3],
3146 priv->dino_alive.time_stamp[4]);
3147 } else {
3148 IPW_DEBUG_INFO("Microcode is not alive\n");
3149 rc = -EINVAL;
3150 }
3151 } else {
3152 IPW_DEBUG_INFO("No alive response from DINO\n");
3153 rc = -ETIME;
3154 }
3155
3156 /* disable DINO, otherwise for some reason
3157 firmware have problem getting alive resp. */
3158 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3159
3160 return rc;
3161 }
3162
3163 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3164 {
3165 int ret = -1;
3166 int offset = 0;
3167 struct fw_chunk *chunk;
3168 int total_nr = 0;
3169 int i;
3170 struct pci_pool *pool;
3171 u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
3172 dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
3173
3174 IPW_DEBUG_TRACE("<< : \n");
3175
3176 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3177 if (!pool) {
3178 IPW_ERROR("pci_pool_create failed\n");
3179 return -ENOMEM;
3180 }
3181
3182 /* Start the Dma */
3183 ret = ipw_fw_dma_enable(priv);
3184
3185 /* the DMA is already ready this would be a bug. */
3186 BUG_ON(priv->sram_desc.last_cb_index > 0);
3187
3188 do {
3189 u32 chunk_len;
3190 u8 *start;
3191 int size;
3192 int nr = 0;
3193
3194 chunk = (struct fw_chunk *)(data + offset);
3195 offset += sizeof(struct fw_chunk);
3196 chunk_len = le32_to_cpu(chunk->length);
3197 start = data + offset;
3198
3199 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3200 for (i = 0; i < nr; i++) {
3201 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3202 &phys[total_nr]);
3203 if (!virts[total_nr]) {
3204 ret = -ENOMEM;
3205 goto out;
3206 }
3207 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3208 CB_MAX_LENGTH);
3209 memcpy(virts[total_nr], start, size);
3210 start += size;
3211 total_nr++;
3212 /* We don't support fw chunk larger than 64*8K */
3213 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3214 }
3215
3216 /* build DMA packet and queue up for sending */
3217 /* dma to chunk->address, the chunk->length bytes from data +
3218 * offeset*/
3219 /* Dma loading */
3220 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3221 nr, le32_to_cpu(chunk->address),
3222 chunk_len);
3223 if (ret) {
3224 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3225 goto out;
3226 }
3227
3228 offset += chunk_len;
3229 } while (offset < len);
3230
3231 /* Run the DMA and wait for the answer */
3232 ret = ipw_fw_dma_kick(priv);
3233 if (ret) {
3234 IPW_ERROR("dmaKick Failed\n");
3235 goto out;
3236 }
3237
3238 ret = ipw_fw_dma_wait(priv);
3239 if (ret) {
3240 IPW_ERROR("dmaWaitSync Failed\n");
3241 goto out;
3242 }
3243 out:
3244 for (i = 0; i < total_nr; i++)
3245 pci_pool_free(pool, virts[i], phys[i]);
3246
3247 pci_pool_destroy(pool);
3248
3249 return ret;
3250 }
3251
3252 /* stop nic */
3253 static int ipw_stop_nic(struct ipw_priv *priv)
3254 {
3255 int rc = 0;
3256
3257 /* stop */
3258 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3259
3260 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3261 IPW_RESET_REG_MASTER_DISABLED, 500);
3262 if (rc < 0) {
3263 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3264 return rc;
3265 }
3266
3267 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3268
3269 return rc;
3270 }
3271
3272 static void ipw_start_nic(struct ipw_priv *priv)
3273 {
3274 IPW_DEBUG_TRACE(">>\n");
3275
3276 /* prvHwStartNic release ARC */
3277 ipw_clear_bit(priv, IPW_RESET_REG,
3278 IPW_RESET_REG_MASTER_DISABLED |
3279 IPW_RESET_REG_STOP_MASTER |
3280 CBD_RESET_REG_PRINCETON_RESET);
3281
3282 /* enable power management */
3283 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3284 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3285
3286 IPW_DEBUG_TRACE("<<\n");
3287 }
3288
3289 static int ipw_init_nic(struct ipw_priv *priv)
3290 {
3291 int rc;
3292
3293 IPW_DEBUG_TRACE(">>\n");
3294 /* reset */
3295 /*prvHwInitNic */
3296 /* set "initialization complete" bit to move adapter to D0 state */
3297 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3298
3299 /* low-level PLL activation */
3300 ipw_write32(priv, IPW_READ_INT_REGISTER,
3301 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3302
3303 /* wait for clock stabilization */
3304 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3305 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3306 if (rc < 0)
3307 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3308
3309 /* assert SW reset */
3310 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3311
3312 udelay(10);
3313
3314 /* set "initialization complete" bit to move adapter to D0 state */
3315 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3316
3317 IPW_DEBUG_TRACE(">>\n");
3318 return 0;
3319 }
3320
3321 /* Call this function from process context, it will sleep in request_firmware.
3322 * Probe is an ok place to call this from.
3323 */
3324 static int ipw_reset_nic(struct ipw_priv *priv)
3325 {
3326 int rc = 0;
3327 unsigned long flags;
3328
3329 IPW_DEBUG_TRACE(">>\n");
3330
3331 rc = ipw_init_nic(priv);
3332
3333 spin_lock_irqsave(&priv->lock, flags);
3334 /* Clear the 'host command active' bit... */
3335 priv->status &= ~STATUS_HCMD_ACTIVE;
3336 wake_up_interruptible(&priv->wait_command_queue);
3337 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3338 wake_up_interruptible(&priv->wait_state);
3339 spin_unlock_irqrestore(&priv->lock, flags);
3340
3341 IPW_DEBUG_TRACE("<<\n");
3342 return rc;
3343 }
3344
3345
3346 struct ipw_fw {
3347 __le32 ver;
3348 __le32 boot_size;
3349 __le32 ucode_size;
3350 __le32 fw_size;
3351 u8 data[0];
3352 };
3353
3354 static int ipw_get_fw(struct ipw_priv *priv,
3355 const struct firmware **raw, const char *name)
3356 {
3357 struct ipw_fw *fw;
3358 int rc;
3359
3360 /* ask firmware_class module to get the boot firmware off disk */
3361 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3362 if (rc < 0) {
3363 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3364 return rc;
3365 }
3366
3367 if ((*raw)->size < sizeof(*fw)) {
3368 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3369 return -EINVAL;
3370 }
3371
3372 fw = (void *)(*raw)->data;
3373
3374 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3375 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3376 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3377 name, (*raw)->size);
3378 return -EINVAL;
3379 }
3380
3381 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3382 name,
3383 le32_to_cpu(fw->ver) >> 16,
3384 le32_to_cpu(fw->ver) & 0xff,
3385 (*raw)->size - sizeof(*fw));
3386 return 0;
3387 }
3388
3389 #define IPW_RX_BUF_SIZE (3000)
3390
3391 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3392 struct ipw_rx_queue *rxq)
3393 {
3394 unsigned long flags;
3395 int i;
3396
3397 spin_lock_irqsave(&rxq->lock, flags);
3398
3399 INIT_LIST_HEAD(&rxq->rx_free);
3400 INIT_LIST_HEAD(&rxq->rx_used);
3401
3402 /* Fill the rx_used queue with _all_ of the Rx buffers */
3403 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3404 /* In the reset function, these buffers may have been allocated
3405 * to an SKB, so we need to unmap and free potential storage */
3406 if (rxq->pool[i].skb != NULL) {
3407 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3408 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3409 dev_kfree_skb(rxq->pool[i].skb);
3410 rxq->pool[i].skb = NULL;
3411 }
3412 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3413 }
3414
3415 /* Set us so that we have processed and used all buffers, but have
3416 * not restocked the Rx queue with fresh buffers */
3417 rxq->read = rxq->write = 0;
3418 rxq->free_count = 0;
3419 spin_unlock_irqrestore(&rxq->lock, flags);
3420 }
3421
3422 #ifdef CONFIG_PM
3423 static int fw_loaded = 0;
3424 static const struct firmware *raw = NULL;
3425
3426 static void free_firmware(void)
3427 {
3428 if (fw_loaded) {
3429 release_firmware(raw);
3430 raw = NULL;
3431 fw_loaded = 0;
3432 }
3433 }
3434 #else
3435 #define free_firmware() do {} while (0)
3436 #endif
3437
3438 static int ipw_load(struct ipw_priv *priv)
3439 {
3440 #ifndef CONFIG_PM
3441 const struct firmware *raw = NULL;
3442 #endif
3443 struct ipw_fw *fw;
3444 u8 *boot_img, *ucode_img, *fw_img;
3445 u8 *name = NULL;
3446 int rc = 0, retries = 3;
3447
3448 switch (priv->ieee->iw_mode) {
3449 case IW_MODE_ADHOC:
3450 name = "ipw2200-ibss.fw";
3451 break;
3452 #ifdef CONFIG_IPW2200_MONITOR
3453 case IW_MODE_MONITOR:
3454 name = "ipw2200-sniffer.fw";
3455 break;
3456 #endif
3457 case IW_MODE_INFRA:
3458 name = "ipw2200-bss.fw";
3459 break;
3460 }
3461
3462 if (!name) {
3463 rc = -EINVAL;
3464 goto error;
3465 }
3466
3467 #ifdef CONFIG_PM
3468 if (!fw_loaded) {
3469 #endif
3470 rc = ipw_get_fw(priv, &raw, name);
3471 if (rc < 0)
3472 goto error;
3473 #ifdef CONFIG_PM
3474 }
3475 #endif
3476
3477 fw = (void *)raw->data;
3478 boot_img = &fw->data[0];
3479 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3480 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3481 le32_to_cpu(fw->ucode_size)];
3482
3483 if (rc < 0)
3484 goto error;
3485
3486 if (!priv->rxq)
3487 priv->rxq = ipw_rx_queue_alloc(priv);
3488 else
3489 ipw_rx_queue_reset(priv, priv->rxq);
3490 if (!priv->rxq) {
3491 IPW_ERROR("Unable to initialize Rx queue\n");
3492 goto error;
3493 }
3494
3495 retry:
3496 /* Ensure interrupts are disabled */
3497 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3498 priv->status &= ~STATUS_INT_ENABLED;
3499
3500 /* ack pending interrupts */
3501 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3502
3503 ipw_stop_nic(priv);
3504
3505 rc = ipw_reset_nic(priv);
3506 if (rc < 0) {
3507 IPW_ERROR("Unable to reset NIC\n");
3508 goto error;
3509 }
3510
3511 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3512 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3513
3514 /* DMA the initial boot firmware into the device */
3515 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3516 if (rc < 0) {
3517 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3518 goto error;
3519 }
3520
3521 /* kick start the device */
3522 ipw_start_nic(priv);
3523
3524 /* wait for the device to finish its initial startup sequence */
3525 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3526 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3527 if (rc < 0) {
3528 IPW_ERROR("device failed to boot initial fw image\n");
3529 goto error;
3530 }
3531 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3532
3533 /* ack fw init done interrupt */
3534 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3535
3536 /* DMA the ucode into the device */
3537 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3538 if (rc < 0) {
3539 IPW_ERROR("Unable to load ucode: %d\n", rc);
3540 goto error;
3541 }
3542
3543 /* stop nic */
3544 ipw_stop_nic(priv);
3545
3546 /* DMA bss firmware into the device */
3547 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3548 if (rc < 0) {
3549 IPW_ERROR("Unable to load firmware: %d\n", rc);
3550 goto error;
3551 }
3552 #ifdef CONFIG_PM
3553 fw_loaded = 1;
3554 #endif
3555
3556 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3557
3558 rc = ipw_queue_reset(priv);
3559 if (rc < 0) {
3560 IPW_ERROR("Unable to initialize queues\n");
3561 goto error;
3562 }
3563
3564 /* Ensure interrupts are disabled */
3565 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3566 /* ack pending interrupts */
3567 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3568
3569 /* kick start the device */
3570 ipw_start_nic(priv);
3571
3572 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3573 if (retries > 0) {
3574 IPW_WARNING("Parity error. Retrying init.\n");
3575 retries--;
3576 goto retry;
3577 }
3578
3579 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3580 rc = -EIO;
3581 goto error;
3582 }
3583
3584 /* wait for the device */
3585 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3586 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3587 if (rc < 0) {
3588 IPW_ERROR("device failed to start within 500ms\n");
3589 goto error;
3590 }
3591 IPW_DEBUG_INFO("device response after %dms\n", rc);
3592
3593 /* ack fw init done interrupt */
3594 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3595
3596 /* read eeprom data and initialize the eeprom region of sram */
3597 priv->eeprom_delay = 1;
3598 ipw_eeprom_init_sram(priv);
3599
3600 /* enable interrupts */
3601 ipw_enable_interrupts(priv);
3602
3603 /* Ensure our queue has valid packets */
3604 ipw_rx_queue_replenish(priv);
3605
3606 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3607
3608 /* ack pending interrupts */
3609 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3610
3611 #ifndef CONFIG_PM
3612 release_firmware(raw);
3613 #endif
3614 return 0;
3615
3616 error:
3617 if (priv->rxq) {
3618 ipw_rx_queue_free(priv, priv->rxq);
3619 priv->rxq = NULL;
3620 }
3621 ipw_tx_queue_free(priv);
3622 if (raw)
3623 release_firmware(raw);
3624 #ifdef CONFIG_PM
3625 fw_loaded = 0;
3626 raw = NULL;
3627 #endif
3628
3629 return rc;
3630 }
3631
3632 /**
3633 * DMA services
3634 *
3635 * Theory of operation
3636 *
3637 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3638 * 2 empty entries always kept in the buffer to protect from overflow.
3639 *
3640 * For Tx queue, there are low mark and high mark limits. If, after queuing
3641 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3642 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3643 * Tx queue resumed.
3644 *
3645 * The IPW operates with six queues, one receive queue in the device's
3646 * sram, one transmit queue for sending commands to the device firmware,
3647 * and four transmit queues for data.
3648 *
3649 * The four transmit queues allow for performing quality of service (qos)
3650 * transmissions as per the 802.11 protocol. Currently Linux does not
3651 * provide a mechanism to the user for utilizing prioritized queues, so
3652 * we only utilize the first data transmit queue (queue1).
3653 */
3654
3655 /**
3656 * Driver allocates buffers of this size for Rx
3657 */
3658
3659 /**
3660 * ipw_rx_queue_space - Return number of free slots available in queue.
3661 */
3662 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3663 {
3664 int s = q->read - q->write;
3665 if (s <= 0)
3666 s += RX_QUEUE_SIZE;
3667 /* keep some buffer to not confuse full and empty queue */
3668 s -= 2;
3669 if (s < 0)
3670 s = 0;
3671 return s;
3672 }
3673
3674 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3675 {
3676 int s = q->last_used - q->first_empty;
3677 if (s <= 0)
3678 s += q->n_bd;
3679 s -= 2; /* keep some reserve to not confuse empty and full situations */
3680 if (s < 0)
3681 s = 0;
3682 return s;
3683 }
3684
3685 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3686 {
3687 return (++index == n_bd) ? 0 : index;
3688 }
3689
3690 /**
3691 * Initialize common DMA queue structure
3692 *
3693 * @param q queue to init
3694 * @param count Number of BD's to allocate. Should be power of 2
3695 * @param read_register Address for 'read' register
3696 * (not offset within BAR, full address)
3697 * @param write_register Address for 'write' register
3698 * (not offset within BAR, full address)
3699 * @param base_register Address for 'base' register
3700 * (not offset within BAR, full address)
3701 * @param size Address for 'size' register
3702 * (not offset within BAR, full address)
3703 */
3704 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3705 int count, u32 read, u32 write, u32 base, u32 size)
3706 {
3707 q->n_bd = count;
3708
3709 q->low_mark = q->n_bd / 4;
3710 if (q->low_mark < 4)
3711 q->low_mark = 4;
3712
3713 q->high_mark = q->n_bd / 8;
3714 if (q->high_mark < 2)
3715 q->high_mark = 2;
3716
3717 q->first_empty = q->last_used = 0;
3718 q->reg_r = read;
3719 q->reg_w = write;
3720
3721 ipw_write32(priv, base, q->dma_addr);
3722 ipw_write32(priv, size, count);
3723 ipw_write32(priv, read, 0);
3724 ipw_write32(priv, write, 0);
3725
3726 _ipw_read32(priv, 0x90);
3727 }
3728
3729 static int ipw_queue_tx_init(struct ipw_priv *priv,
3730 struct clx2_tx_queue *q,
3731 int count, u32 read, u32 write, u32 base, u32 size)
3732 {
3733 struct pci_dev *dev = priv->pci_dev;
3734
3735 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3736 if (!q->txb) {
3737 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3738 return -ENOMEM;
3739 }
3740
3741 q->bd =
3742 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3743 if (!q->bd) {
3744 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3745 sizeof(q->bd[0]) * count);
3746 kfree(q->txb);
3747 q->txb = NULL;
3748 return -ENOMEM;
3749 }
3750
3751 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3752 return 0;
3753 }
3754
3755 /**
3756 * Free one TFD, those at index [txq->q.last_used].
3757 * Do NOT advance any indexes
3758 *
3759 * @param dev
3760 * @param txq
3761 */
3762 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3763 struct clx2_tx_queue *txq)
3764 {
3765 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3766 struct pci_dev *dev = priv->pci_dev;
3767 int i;
3768
3769 /* classify bd */
3770 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3771 /* nothing to cleanup after for host commands */
3772 return;
3773
3774 /* sanity check */
3775 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3776 IPW_ERROR("Too many chunks: %i\n",
3777 le32_to_cpu(bd->u.data.num_chunks));
3778 /** @todo issue fatal error, it is quite serious situation */
3779 return;
3780 }
3781
3782 /* unmap chunks if any */
3783 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3784 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3785 le16_to_cpu(bd->u.data.chunk_len[i]),
3786 PCI_DMA_TODEVICE);
3787 if (txq->txb[txq->q.last_used]) {
3788 libipw_txb_free(txq->txb[txq->q.last_used]);
3789 txq->txb[txq->q.last_used] = NULL;
3790 }
3791 }
3792 }
3793
3794 /**
3795 * Deallocate DMA queue.
3796 *
3797 * Empty queue by removing and destroying all BD's.
3798 * Free all buffers.
3799 *
3800 * @param dev
3801 * @param q
3802 */
3803 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3804 {
3805 struct clx2_queue *q = &txq->q;
3806 struct pci_dev *dev = priv->pci_dev;
3807
3808 if (q->n_bd == 0)
3809 return;
3810
3811 /* first, empty all BD's */
3812 for (; q->first_empty != q->last_used;
3813 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3814 ipw_queue_tx_free_tfd(priv, txq);
3815 }
3816
3817 /* free buffers belonging to queue itself */
3818 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3819 q->dma_addr);
3820 kfree(txq->txb);
3821
3822 /* 0 fill whole structure */
3823 memset(txq, 0, sizeof(*txq));
3824 }
3825
3826 /**
3827 * Destroy all DMA queues and structures
3828 *
3829 * @param priv
3830 */
3831 static void ipw_tx_queue_free(struct ipw_priv *priv)
3832 {
3833 /* Tx CMD queue */
3834 ipw_queue_tx_free(priv, &priv->txq_cmd);
3835
3836 /* Tx queues */
3837 ipw_queue_tx_free(priv, &priv->txq[0]);
3838 ipw_queue_tx_free(priv, &priv->txq[1]);
3839 ipw_queue_tx_free(priv, &priv->txq[2]);
3840 ipw_queue_tx_free(priv, &priv->txq[3]);
3841 }
3842
3843 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3844 {
3845 /* First 3 bytes are manufacturer */
3846 bssid[0] = priv->mac_addr[0];
3847 bssid[1] = priv->mac_addr[1];
3848 bssid[2] = priv->mac_addr[2];
3849
3850 /* Last bytes are random */
3851 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3852
3853 bssid[0] &= 0xfe; /* clear multicast bit */
3854 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3855 }
3856
3857 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3858 {
3859 struct ipw_station_entry entry;
3860 int i;
3861
3862 for (i = 0; i < priv->num_stations; i++) {
3863 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3864 /* Another node is active in network */
3865 priv->missed_adhoc_beacons = 0;
3866 if (!(priv->config & CFG_STATIC_CHANNEL))
3867 /* when other nodes drop out, we drop out */
3868 priv->config &= ~CFG_ADHOC_PERSIST;
3869
3870 return i;
3871 }
3872 }
3873
3874 if (i == MAX_STATIONS)
3875 return IPW_INVALID_STATION;
3876
3877 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3878
3879 entry.reserved = 0;
3880 entry.support_mode = 0;
3881 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3882 memcpy(priv->stations[i], bssid, ETH_ALEN);
3883 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3884 &entry, sizeof(entry));
3885 priv->num_stations++;
3886
3887 return i;
3888 }
3889
3890 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3891 {
3892 int i;
3893
3894 for (i = 0; i < priv->num_stations; i++)
3895 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3896 return i;
3897
3898 return IPW_INVALID_STATION;
3899 }
3900
3901 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3902 {
3903 int err;
3904
3905 if (priv->status & STATUS_ASSOCIATING) {
3906 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3907 queue_work(priv->workqueue, &priv->disassociate);
3908 return;
3909 }
3910
3911 if (!(priv->status & STATUS_ASSOCIATED)) {
3912 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3913 return;
3914 }
3915
3916 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3917 "on channel %d.\n",
3918 priv->assoc_request.bssid,
3919 priv->assoc_request.channel);
3920
3921 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3922 priv->status |= STATUS_DISASSOCIATING;
3923
3924 if (quiet)
3925 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3926 else
3927 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3928
3929 err = ipw_send_associate(priv, &priv->assoc_request);
3930 if (err) {
3931 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3932 "failed.\n");
3933 return;
3934 }
3935
3936 }
3937
3938 static int ipw_disassociate(void *data)
3939 {
3940 struct ipw_priv *priv = data;
3941 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3942 return 0;
3943 ipw_send_disassociate(data, 0);
3944 netif_carrier_off(priv->net_dev);
3945 return 1;
3946 }
3947
3948 static void ipw_bg_disassociate(struct work_struct *work)
3949 {
3950 struct ipw_priv *priv =
3951 container_of(work, struct ipw_priv, disassociate);
3952 mutex_lock(&priv->mutex);
3953 ipw_disassociate(priv);
3954 mutex_unlock(&priv->mutex);
3955 }
3956
3957 static void ipw_system_config(struct work_struct *work)
3958 {
3959 struct ipw_priv *priv =
3960 container_of(work, struct ipw_priv, system_config);
3961
3962 #ifdef CONFIG_IPW2200_PROMISCUOUS
3963 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3964 priv->sys_config.accept_all_data_frames = 1;
3965 priv->sys_config.accept_non_directed_frames = 1;
3966 priv->sys_config.accept_all_mgmt_bcpr = 1;
3967 priv->sys_config.accept_all_mgmt_frames = 1;
3968 }
3969 #endif
3970
3971 ipw_send_system_config(priv);
3972 }
3973
3974 struct ipw_status_code {
3975 u16 status;
3976 const char *reason;
3977 };
3978
3979 static const struct ipw_status_code ipw_status_codes[] = {
3980 {0x00, "Successful"},
3981 {0x01, "Unspecified failure"},
3982 {0x0A, "Cannot support all requested capabilities in the "
3983 "Capability information field"},
3984 {0x0B, "Reassociation denied due to inability to confirm that "
3985 "association exists"},
3986 {0x0C, "Association denied due to reason outside the scope of this "
3987 "standard"},
3988 {0x0D,
3989 "Responding station does not support the specified authentication "
3990 "algorithm"},
3991 {0x0E,
3992 "Received an Authentication frame with authentication sequence "
3993 "transaction sequence number out of expected sequence"},
3994 {0x0F, "Authentication rejected because of challenge failure"},
3995 {0x10, "Authentication rejected due to timeout waiting for next "
3996 "frame in sequence"},
3997 {0x11, "Association denied because AP is unable to handle additional "
3998 "associated stations"},
3999 {0x12,
4000 "Association denied due to requesting station not supporting all "
4001 "of the datarates in the BSSBasicServiceSet Parameter"},
4002 {0x13,
4003 "Association denied due to requesting station not supporting "
4004 "short preamble operation"},
4005 {0x14,
4006 "Association denied due to requesting station not supporting "
4007 "PBCC encoding"},
4008 {0x15,
4009 "Association denied due to requesting station not supporting "
4010 "channel agility"},
4011 {0x19,
4012 "Association denied due to requesting station not supporting "
4013 "short slot operation"},
4014 {0x1A,
4015 "Association denied due to requesting station not supporting "
4016 "DSSS-OFDM operation"},
4017 {0x28, "Invalid Information Element"},
4018 {0x29, "Group Cipher is not valid"},
4019 {0x2A, "Pairwise Cipher is not valid"},
4020 {0x2B, "AKMP is not valid"},
4021 {0x2C, "Unsupported RSN IE version"},
4022 {0x2D, "Invalid RSN IE Capabilities"},
4023 {0x2E, "Cipher suite is rejected per security policy"},
4024 };
4025
4026 static const char *ipw_get_status_code(u16 status)
4027 {
4028 int i;
4029 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4030 if (ipw_status_codes[i].status == (status & 0xff))
4031 return ipw_status_codes[i].reason;
4032 return "Unknown status value.";
4033 }
4034
4035 static void inline average_init(struct average *avg)
4036 {
4037 memset(avg, 0, sizeof(*avg));
4038 }
4039
4040 #define DEPTH_RSSI 8
4041 #define DEPTH_NOISE 16
4042 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4043 {
4044 return ((depth-1)*prev_avg + val)/depth;
4045 }
4046
4047 static void average_add(struct average *avg, s16 val)
4048 {
4049 avg->sum -= avg->entries[avg->pos];
4050 avg->sum += val;
4051 avg->entries[avg->pos++] = val;
4052 if (unlikely(avg->pos == AVG_ENTRIES)) {
4053 avg->init = 1;
4054 avg->pos = 0;
4055 }
4056 }
4057
4058 static s16 average_value(struct average *avg)
4059 {
4060 if (!unlikely(avg->init)) {
4061 if (avg->pos)
4062 return avg->sum / avg->pos;
4063 return 0;
4064 }
4065
4066 return avg->sum / AVG_ENTRIES;
4067 }
4068
4069 static void ipw_reset_stats(struct ipw_priv *priv)
4070 {
4071 u32 len = sizeof(u32);
4072
4073 priv->quality = 0;
4074
4075 average_init(&priv->average_missed_beacons);
4076 priv->exp_avg_rssi = -60;
4077 priv->exp_avg_noise = -85 + 0x100;
4078
4079 priv->last_rate = 0;
4080 priv->last_missed_beacons = 0;
4081 priv->last_rx_packets = 0;
4082 priv->last_tx_packets = 0;
4083 priv->last_tx_failures = 0;
4084
4085 /* Firmware managed, reset only when NIC is restarted, so we have to
4086 * normalize on the current value */
4087 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4088 &priv->last_rx_err, &len);
4089 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4090 &priv->last_tx_failures, &len);
4091
4092 /* Driver managed, reset with each association */
4093 priv->missed_adhoc_beacons = 0;
4094 priv->missed_beacons = 0;
4095 priv->tx_packets = 0;
4096 priv->rx_packets = 0;
4097
4098 }
4099
4100 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4101 {
4102 u32 i = 0x80000000;
4103 u32 mask = priv->rates_mask;
4104 /* If currently associated in B mode, restrict the maximum
4105 * rate match to B rates */
4106 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4107 mask &= LIBIPW_CCK_RATES_MASK;
4108
4109 /* TODO: Verify that the rate is supported by the current rates
4110 * list. */
4111
4112 while (i && !(mask & i))
4113 i >>= 1;
4114 switch (i) {
4115 case LIBIPW_CCK_RATE_1MB_MASK:
4116 return 1000000;
4117 case LIBIPW_CCK_RATE_2MB_MASK:
4118 return 2000000;
4119 case LIBIPW_CCK_RATE_5MB_MASK:
4120 return 5500000;
4121 case LIBIPW_OFDM_RATE_6MB_MASK:
4122 return 6000000;
4123 case LIBIPW_OFDM_RATE_9MB_MASK:
4124 return 9000000;
4125 case LIBIPW_CCK_RATE_11MB_MASK:
4126 return 11000000;
4127 case LIBIPW_OFDM_RATE_12MB_MASK:
4128 return 12000000;
4129 case LIBIPW_OFDM_RATE_18MB_MASK:
4130 return 18000000;
4131 case LIBIPW_OFDM_RATE_24MB_MASK:
4132 return 24000000;
4133 case LIBIPW_OFDM_RATE_36MB_MASK:
4134 return 36000000;
4135 case LIBIPW_OFDM_RATE_48MB_MASK:
4136 return 48000000;
4137 case LIBIPW_OFDM_RATE_54MB_MASK:
4138 return 54000000;
4139 }
4140
4141 if (priv->ieee->mode == IEEE_B)
4142 return 11000000;
4143 else
4144 return 54000000;
4145 }
4146
4147 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4148 {
4149 u32 rate, len = sizeof(rate);
4150 int err;
4151
4152 if (!(priv->status & STATUS_ASSOCIATED))
4153 return 0;
4154
4155 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4156 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4157 &len);
4158 if (err) {
4159 IPW_DEBUG_INFO("failed querying ordinals.\n");
4160 return 0;
4161 }
4162 } else
4163 return ipw_get_max_rate(priv);
4164
4165 switch (rate) {
4166 case IPW_TX_RATE_1MB:
4167 return 1000000;
4168 case IPW_TX_RATE_2MB:
4169 return 2000000;
4170 case IPW_TX_RATE_5MB:
4171 return 5500000;
4172 case IPW_TX_RATE_6MB:
4173 return 6000000;
4174 case IPW_TX_RATE_9MB:
4175 return 9000000;
4176 case IPW_TX_RATE_11MB:
4177 return 11000000;
4178 case IPW_TX_RATE_12MB:
4179 return 12000000;
4180 case IPW_TX_RATE_18MB:
4181 return 18000000;
4182 case IPW_TX_RATE_24MB:
4183 return 24000000;
4184 case IPW_TX_RATE_36MB:
4185 return 36000000;
4186 case IPW_TX_RATE_48MB:
4187 return 48000000;
4188 case IPW_TX_RATE_54MB:
4189 return 54000000;
4190 }
4191
4192 return 0;
4193 }
4194
4195 #define IPW_STATS_INTERVAL (2 * HZ)
4196 static void ipw_gather_stats(struct ipw_priv *priv)
4197 {
4198 u32 rx_err, rx_err_delta, rx_packets_delta;
4199 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4200 u32 missed_beacons_percent, missed_beacons_delta;
4201 u32 quality = 0;
4202 u32 len = sizeof(u32);
4203 s16 rssi;
4204 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4205 rate_quality;
4206 u32 max_rate;
4207
4208 if (!(priv->status & STATUS_ASSOCIATED)) {
4209 priv->quality = 0;
4210 return;
4211 }
4212
4213 /* Update the statistics */
4214 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4215 &priv->missed_beacons, &len);
4216 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4217 priv->last_missed_beacons = priv->missed_beacons;
4218 if (priv->assoc_request.beacon_interval) {
4219 missed_beacons_percent = missed_beacons_delta *
4220 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4221 (IPW_STATS_INTERVAL * 10);
4222 } else {
4223 missed_beacons_percent = 0;
4224 }
4225 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4226
4227 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4228 rx_err_delta = rx_err - priv->last_rx_err;
4229 priv->last_rx_err = rx_err;
4230
4231 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4232 tx_failures_delta = tx_failures - priv->last_tx_failures;
4233 priv->last_tx_failures = tx_failures;
4234
4235 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4236 priv->last_rx_packets = priv->rx_packets;
4237
4238 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4239 priv->last_tx_packets = priv->tx_packets;
4240
4241 /* Calculate quality based on the following:
4242 *
4243 * Missed beacon: 100% = 0, 0% = 70% missed
4244 * Rate: 60% = 1Mbs, 100% = Max
4245 * Rx and Tx errors represent a straight % of total Rx/Tx
4246 * RSSI: 100% = > -50, 0% = < -80
4247 * Rx errors: 100% = 0, 0% = 50% missed
4248 *
4249 * The lowest computed quality is used.
4250 *
4251 */
4252 #define BEACON_THRESHOLD 5
4253 beacon_quality = 100 - missed_beacons_percent;
4254 if (beacon_quality < BEACON_THRESHOLD)
4255 beacon_quality = 0;
4256 else
4257 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4258 (100 - BEACON_THRESHOLD);
4259 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4260 beacon_quality, missed_beacons_percent);
4261
4262 priv->last_rate = ipw_get_current_rate(priv);
4263 max_rate = ipw_get_max_rate(priv);
4264 rate_quality = priv->last_rate * 40 / max_rate + 60;
4265 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4266 rate_quality, priv->last_rate / 1000000);
4267
4268 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4269 rx_quality = 100 - (rx_err_delta * 100) /
4270 (rx_packets_delta + rx_err_delta);
4271 else
4272 rx_quality = 100;
4273 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4274 rx_quality, rx_err_delta, rx_packets_delta);
4275
4276 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4277 tx_quality = 100 - (tx_failures_delta * 100) /
4278 (tx_packets_delta + tx_failures_delta);
4279 else
4280 tx_quality = 100;
4281 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4282 tx_quality, tx_failures_delta, tx_packets_delta);
4283
4284 rssi = priv->exp_avg_rssi;
4285 signal_quality =
4286 (100 *
4287 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4288 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4289 (priv->ieee->perfect_rssi - rssi) *
4290 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4291 62 * (priv->ieee->perfect_rssi - rssi))) /
4292 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4293 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4294 if (signal_quality > 100)
4295 signal_quality = 100;
4296 else if (signal_quality < 1)
4297 signal_quality = 0;
4298
4299 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4300 signal_quality, rssi);
4301
4302 quality = min(rx_quality, signal_quality);
4303 quality = min(tx_quality, quality);
4304 quality = min(rate_quality, quality);
4305 quality = min(beacon_quality, quality);
4306 if (quality == beacon_quality)
4307 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4308 quality);
4309 if (quality == rate_quality)
4310 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4311 quality);
4312 if (quality == tx_quality)
4313 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4314 quality);
4315 if (quality == rx_quality)
4316 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4317 quality);
4318 if (quality == signal_quality)
4319 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4320 quality);
4321
4322 priv->quality = quality;
4323
4324 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4325 IPW_STATS_INTERVAL);
4326 }
4327
4328 static void ipw_bg_gather_stats(struct work_struct *work)
4329 {
4330 struct ipw_priv *priv =
4331 container_of(work, struct ipw_priv, gather_stats.work);
4332 mutex_lock(&priv->mutex);
4333 ipw_gather_stats(priv);
4334 mutex_unlock(&priv->mutex);
4335 }
4336
4337 /* Missed beacon behavior:
4338 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4339 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4340 * Above disassociate threshold, give up and stop scanning.
4341 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4342 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4343 int missed_count)
4344 {
4345 priv->notif_missed_beacons = missed_count;
4346
4347 if (missed_count > priv->disassociate_threshold &&
4348 priv->status & STATUS_ASSOCIATED) {
4349 /* If associated and we've hit the missed
4350 * beacon threshold, disassociate, turn
4351 * off roaming, and abort any active scans */
4352 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4353 IPW_DL_STATE | IPW_DL_ASSOC,
4354 "Missed beacon: %d - disassociate\n", missed_count);
4355 priv->status &= ~STATUS_ROAMING;
4356 if (priv->status & STATUS_SCANNING) {
4357 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4358 IPW_DL_STATE,
4359 "Aborting scan with missed beacon.\n");
4360 queue_work(priv->workqueue, &priv->abort_scan);
4361 }
4362
4363 queue_work(priv->workqueue, &priv->disassociate);
4364 return;
4365 }
4366
4367 if (priv->status & STATUS_ROAMING) {
4368 /* If we are currently roaming, then just
4369 * print a debug statement... */
4370 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4371 "Missed beacon: %d - roam in progress\n",
4372 missed_count);
4373 return;
4374 }
4375
4376 if (roaming &&
4377 (missed_count > priv->roaming_threshold &&
4378 missed_count <= priv->disassociate_threshold)) {
4379 /* If we are not already roaming, set the ROAM
4380 * bit in the status and kick off a scan.
4381 * This can happen several times before we reach
4382 * disassociate_threshold. */
4383 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4384 "Missed beacon: %d - initiate "
4385 "roaming\n", missed_count);
4386 if (!(priv->status & STATUS_ROAMING)) {
4387 priv->status |= STATUS_ROAMING;
4388 if (!(priv->status & STATUS_SCANNING))
4389 queue_delayed_work(priv->workqueue,
4390 &priv->request_scan, 0);
4391 }
4392 return;
4393 }
4394
4395 if (priv->status & STATUS_SCANNING &&
4396 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4397 /* Stop scan to keep fw from getting
4398 * stuck (only if we aren't roaming --
4399 * otherwise we'll never scan more than 2 or 3
4400 * channels..) */
4401 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4402 "Aborting scan with missed beacon.\n");
4403 queue_work(priv->workqueue, &priv->abort_scan);
4404 }
4405
4406 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4407 }
4408
4409 static void ipw_scan_event(struct work_struct *work)
4410 {
4411 union iwreq_data wrqu;
4412
4413 struct ipw_priv *priv =
4414 container_of(work, struct ipw_priv, scan_event.work);
4415
4416 wrqu.data.length = 0;
4417 wrqu.data.flags = 0;
4418 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4419 }
4420
4421 static void handle_scan_event(struct ipw_priv *priv)
4422 {
4423 /* Only userspace-requested scan completion events go out immediately */
4424 if (!priv->user_requested_scan) {
4425 if (!delayed_work_pending(&priv->scan_event))
4426 queue_delayed_work(priv->workqueue, &priv->scan_event,
4427 round_jiffies_relative(msecs_to_jiffies(4000)));
4428 } else {
4429 union iwreq_data wrqu;
4430
4431 priv->user_requested_scan = 0;
4432 cancel_delayed_work(&priv->scan_event);
4433
4434 wrqu.data.length = 0;
4435 wrqu.data.flags = 0;
4436 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4437 }
4438 }
4439
4440 /**
4441 * Handle host notification packet.
4442 * Called from interrupt routine
4443 */
4444 static void ipw_rx_notification(struct ipw_priv *priv,
4445 struct ipw_rx_notification *notif)
4446 {
4447 DECLARE_SSID_BUF(ssid);
4448 u16 size = le16_to_cpu(notif->size);
4449
4450 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4451
4452 switch (notif->subtype) {
4453 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4454 struct notif_association *assoc = &notif->u.assoc;
4455
4456 switch (assoc->state) {
4457 case CMAS_ASSOCIATED:{
4458 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4459 IPW_DL_ASSOC,
4460 "associated: '%s' %pM \n",
4461 print_ssid(ssid, priv->essid,
4462 priv->essid_len),
4463 priv->bssid);
4464
4465 switch (priv->ieee->iw_mode) {
4466 case IW_MODE_INFRA:
4467 memcpy(priv->ieee->bssid,
4468 priv->bssid, ETH_ALEN);
4469 break;
4470
4471 case IW_MODE_ADHOC:
4472 memcpy(priv->ieee->bssid,
4473 priv->bssid, ETH_ALEN);
4474
4475 /* clear out the station table */
4476 priv->num_stations = 0;
4477
4478 IPW_DEBUG_ASSOC
4479 ("queueing adhoc check\n");
4480 queue_delayed_work(priv->
4481 workqueue,
4482 &priv->
4483 adhoc_check,
4484 le16_to_cpu(priv->
4485 assoc_request.
4486 beacon_interval));
4487 break;
4488 }
4489
4490 priv->status &= ~STATUS_ASSOCIATING;
4491 priv->status |= STATUS_ASSOCIATED;
4492 queue_work(priv->workqueue,
4493 &priv->system_config);
4494
4495 #ifdef CONFIG_IPW2200_QOS
4496 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4497 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4498 if ((priv->status & STATUS_AUTH) &&
4499 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4500 == IEEE80211_STYPE_ASSOC_RESP)) {
4501 if ((sizeof
4502 (struct
4503 libipw_assoc_response)
4504 <= size)
4505 && (size <= 2314)) {
4506 struct
4507 libipw_rx_stats
4508 stats = {
4509 .len = size - 1,
4510 };
4511
4512 IPW_DEBUG_QOS
4513 ("QoS Associate "
4514 "size %d\n", size);
4515 libipw_rx_mgt(priv->
4516 ieee,
4517 (struct
4518 libipw_hdr_4addr
4519 *)
4520 &notif->u.raw, &stats);
4521 }
4522 }
4523 #endif
4524
4525 schedule_work(&priv->link_up);
4526
4527 break;
4528 }
4529
4530 case CMAS_AUTHENTICATED:{
4531 if (priv->
4532 status & (STATUS_ASSOCIATED |
4533 STATUS_AUTH)) {
4534 struct notif_authenticate *auth
4535 = &notif->u.auth;
4536 IPW_DEBUG(IPW_DL_NOTIF |
4537 IPW_DL_STATE |
4538 IPW_DL_ASSOC,
4539 "deauthenticated: '%s' "
4540 "%pM"
4541 ": (0x%04X) - %s \n",
4542 print_ssid(ssid,
4543 priv->
4544 essid,
4545 priv->
4546 essid_len),
4547 priv->bssid,
4548 le16_to_cpu(auth->status),
4549 ipw_get_status_code
4550 (le16_to_cpu
4551 (auth->status)));
4552
4553 priv->status &=
4554 ~(STATUS_ASSOCIATING |
4555 STATUS_AUTH |
4556 STATUS_ASSOCIATED);
4557
4558 schedule_work(&priv->link_down);
4559 break;
4560 }
4561
4562 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4563 IPW_DL_ASSOC,
4564 "authenticated: '%s' %pM\n",
4565 print_ssid(ssid, priv->essid,
4566 priv->essid_len),
4567 priv->bssid);
4568 break;
4569 }
4570
4571 case CMAS_INIT:{
4572 if (priv->status & STATUS_AUTH) {
4573 struct
4574 libipw_assoc_response
4575 *resp;
4576 resp =
4577 (struct
4578 libipw_assoc_response
4579 *)&notif->u.raw;
4580 IPW_DEBUG(IPW_DL_NOTIF |
4581 IPW_DL_STATE |
4582 IPW_DL_ASSOC,
4583 "association failed (0x%04X): %s\n",
4584 le16_to_cpu(resp->status),
4585 ipw_get_status_code
4586 (le16_to_cpu
4587 (resp->status)));
4588 }
4589
4590 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4591 IPW_DL_ASSOC,
4592 "disassociated: '%s' %pM \n",
4593 print_ssid(ssid, priv->essid,
4594 priv->essid_len),
4595 priv->bssid);
4596
4597 priv->status &=
4598 ~(STATUS_DISASSOCIATING |
4599 STATUS_ASSOCIATING |
4600 STATUS_ASSOCIATED | STATUS_AUTH);
4601 if (priv->assoc_network
4602 && (priv->assoc_network->
4603 capability &
4604 WLAN_CAPABILITY_IBSS))
4605 ipw_remove_current_network
4606 (priv);
4607
4608 schedule_work(&priv->link_down);
4609
4610 break;
4611 }
4612
4613 case CMAS_RX_ASSOC_RESP:
4614 break;
4615
4616 default:
4617 IPW_ERROR("assoc: unknown (%d)\n",
4618 assoc->state);
4619 break;
4620 }
4621
4622 break;
4623 }
4624
4625 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4626 struct notif_authenticate *auth = &notif->u.auth;
4627 switch (auth->state) {
4628 case CMAS_AUTHENTICATED:
4629 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4630 "authenticated: '%s' %pM \n",
4631 print_ssid(ssid, priv->essid,
4632 priv->essid_len),
4633 priv->bssid);
4634 priv->status |= STATUS_AUTH;
4635 break;
4636
4637 case CMAS_INIT:
4638 if (priv->status & STATUS_AUTH) {
4639 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4640 IPW_DL_ASSOC,
4641 "authentication failed (0x%04X): %s\n",
4642 le16_to_cpu(auth->status),
4643 ipw_get_status_code(le16_to_cpu
4644 (auth->
4645 status)));
4646 }
4647 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4648 IPW_DL_ASSOC,
4649 "deauthenticated: '%s' %pM\n",
4650 print_ssid(ssid, priv->essid,
4651 priv->essid_len),
4652 priv->bssid);
4653
4654 priv->status &= ~(STATUS_ASSOCIATING |
4655 STATUS_AUTH |
4656 STATUS_ASSOCIATED);
4657
4658 schedule_work(&priv->link_down);
4659 break;
4660
4661 case CMAS_TX_AUTH_SEQ_1:
4662 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4663 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4664 break;
4665 case CMAS_RX_AUTH_SEQ_2:
4666 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4667 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4668 break;
4669 case CMAS_AUTH_SEQ_1_PASS:
4670 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4671 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4672 break;
4673 case CMAS_AUTH_SEQ_1_FAIL:
4674 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4675 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4676 break;
4677 case CMAS_TX_AUTH_SEQ_3:
4678 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4679 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4680 break;
4681 case CMAS_RX_AUTH_SEQ_4:
4682 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4683 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4684 break;
4685 case CMAS_AUTH_SEQ_2_PASS:
4686 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4687 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4688 break;
4689 case CMAS_AUTH_SEQ_2_FAIL:
4690 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4691 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4692 break;
4693 case CMAS_TX_ASSOC:
4694 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4695 IPW_DL_ASSOC, "TX_ASSOC\n");
4696 break;
4697 case CMAS_RX_ASSOC_RESP:
4698 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4699 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4700
4701 break;
4702 case CMAS_ASSOCIATED:
4703 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4704 IPW_DL_ASSOC, "ASSOCIATED\n");
4705 break;
4706 default:
4707 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4708 auth->state);
4709 break;
4710 }
4711 break;
4712 }
4713
4714 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4715 struct notif_channel_result *x =
4716 &notif->u.channel_result;
4717
4718 if (size == sizeof(*x)) {
4719 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4720 x->channel_num);
4721 } else {
4722 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4723 "(should be %zd)\n",
4724 size, sizeof(*x));
4725 }
4726 break;
4727 }
4728
4729 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4730 struct notif_scan_complete *x = &notif->u.scan_complete;
4731 if (size == sizeof(*x)) {
4732 IPW_DEBUG_SCAN
4733 ("Scan completed: type %d, %d channels, "
4734 "%d status\n", x->scan_type,
4735 x->num_channels, x->status);
4736 } else {
4737 IPW_ERROR("Scan completed of wrong size %d "
4738 "(should be %zd)\n",
4739 size, sizeof(*x));
4740 }
4741
4742 priv->status &=
4743 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4744
4745 wake_up_interruptible(&priv->wait_state);
4746 cancel_delayed_work(&priv->scan_check);
4747
4748 if (priv->status & STATUS_EXIT_PENDING)
4749 break;
4750
4751 priv->ieee->scans++;
4752
4753 #ifdef CONFIG_IPW2200_MONITOR
4754 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4755 priv->status |= STATUS_SCAN_FORCED;
4756 queue_delayed_work(priv->workqueue,
4757 &priv->request_scan, 0);
4758 break;
4759 }
4760 priv->status &= ~STATUS_SCAN_FORCED;
4761 #endif /* CONFIG_IPW2200_MONITOR */
4762
4763 /* Do queued direct scans first */
4764 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4765 queue_delayed_work(priv->workqueue,
4766 &priv->request_direct_scan, 0);
4767 }
4768
4769 if (!(priv->status & (STATUS_ASSOCIATED |
4770 STATUS_ASSOCIATING |
4771 STATUS_ROAMING |
4772 STATUS_DISASSOCIATING)))
4773 queue_work(priv->workqueue, &priv->associate);
4774 else if (priv->status & STATUS_ROAMING) {
4775 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4776 /* If a scan completed and we are in roam mode, then
4777 * the scan that completed was the one requested as a
4778 * result of entering roam... so, schedule the
4779 * roam work */
4780 queue_work(priv->workqueue,
4781 &priv->roam);
4782 else
4783 /* Don't schedule if we aborted the scan */
4784 priv->status &= ~STATUS_ROAMING;
4785 } else if (priv->status & STATUS_SCAN_PENDING)
4786 queue_delayed_work(priv->workqueue,
4787 &priv->request_scan, 0);
4788 else if (priv->config & CFG_BACKGROUND_SCAN
4789 && priv->status & STATUS_ASSOCIATED)
4790 queue_delayed_work(priv->workqueue,
4791 &priv->request_scan,
4792 round_jiffies_relative(HZ));
4793
4794 /* Send an empty event to user space.
4795 * We don't send the received data on the event because
4796 * it would require us to do complex transcoding, and
4797 * we want to minimise the work done in the irq handler
4798 * Use a request to extract the data.
4799 * Also, we generate this even for any scan, regardless
4800 * on how the scan was initiated. User space can just
4801 * sync on periodic scan to get fresh data...
4802 * Jean II */
4803 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4804 handle_scan_event(priv);
4805 break;
4806 }
4807
4808 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4809 struct notif_frag_length *x = &notif->u.frag_len;
4810
4811 if (size == sizeof(*x))
4812 IPW_ERROR("Frag length: %d\n",
4813 le16_to_cpu(x->frag_length));
4814 else
4815 IPW_ERROR("Frag length of wrong size %d "
4816 "(should be %zd)\n",
4817 size, sizeof(*x));
4818 break;
4819 }
4820
4821 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4822 struct notif_link_deterioration *x =
4823 &notif->u.link_deterioration;
4824
4825 if (size == sizeof(*x)) {
4826 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4827 "link deterioration: type %d, cnt %d\n",
4828 x->silence_notification_type,
4829 x->silence_count);
4830 memcpy(&priv->last_link_deterioration, x,
4831 sizeof(*x));
4832 } else {
4833 IPW_ERROR("Link Deterioration of wrong size %d "
4834 "(should be %zd)\n",
4835 size, sizeof(*x));
4836 }
4837 break;
4838 }
4839
4840 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4841 IPW_ERROR("Dino config\n");
4842 if (priv->hcmd
4843 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4844 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4845
4846 break;
4847 }
4848
4849 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4850 struct notif_beacon_state *x = &notif->u.beacon_state;
4851 if (size != sizeof(*x)) {
4852 IPW_ERROR
4853 ("Beacon state of wrong size %d (should "
4854 "be %zd)\n", size, sizeof(*x));
4855 break;
4856 }
4857
4858 if (le32_to_cpu(x->state) ==
4859 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4860 ipw_handle_missed_beacon(priv,
4861 le32_to_cpu(x->
4862 number));
4863
4864 break;
4865 }
4866
4867 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4868 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4869 if (size == sizeof(*x)) {
4870 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4871 "0x%02x station %d\n",
4872 x->key_state, x->security_type,
4873 x->station_index);
4874 break;
4875 }
4876
4877 IPW_ERROR
4878 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4879 size, sizeof(*x));
4880 break;
4881 }
4882
4883 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4884 struct notif_calibration *x = &notif->u.calibration;
4885
4886 if (size == sizeof(*x)) {
4887 memcpy(&priv->calib, x, sizeof(*x));
4888 IPW_DEBUG_INFO("TODO: Calibration\n");
4889 break;
4890 }
4891
4892 IPW_ERROR
4893 ("Calibration of wrong size %d (should be %zd)\n",
4894 size, sizeof(*x));
4895 break;
4896 }
4897
4898 case HOST_NOTIFICATION_NOISE_STATS:{
4899 if (size == sizeof(u32)) {
4900 priv->exp_avg_noise =
4901 exponential_average(priv->exp_avg_noise,
4902 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4903 DEPTH_NOISE);
4904 break;
4905 }
4906
4907 IPW_ERROR
4908 ("Noise stat is wrong size %d (should be %zd)\n",
4909 size, sizeof(u32));
4910 break;
4911 }
4912
4913 default:
4914 IPW_DEBUG_NOTIF("Unknown notification: "
4915 "subtype=%d,flags=0x%2x,size=%d\n",
4916 notif->subtype, notif->flags, size);
4917 }
4918 }
4919
4920 /**
4921 * Destroys all DMA structures and initialise them again
4922 *
4923 * @param priv
4924 * @return error code
4925 */
4926 static int ipw_queue_reset(struct ipw_priv *priv)
4927 {
4928 int rc = 0;
4929 /** @todo customize queue sizes */
4930 int nTx = 64, nTxCmd = 8;
4931 ipw_tx_queue_free(priv);
4932 /* Tx CMD queue */
4933 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4934 IPW_TX_CMD_QUEUE_READ_INDEX,
4935 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4936 IPW_TX_CMD_QUEUE_BD_BASE,
4937 IPW_TX_CMD_QUEUE_BD_SIZE);
4938 if (rc) {
4939 IPW_ERROR("Tx Cmd queue init failed\n");
4940 goto error;
4941 }
4942 /* Tx queue(s) */
4943 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4944 IPW_TX_QUEUE_0_READ_INDEX,
4945 IPW_TX_QUEUE_0_WRITE_INDEX,
4946 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4947 if (rc) {
4948 IPW_ERROR("Tx 0 queue init failed\n");
4949 goto error;
4950 }
4951 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4952 IPW_TX_QUEUE_1_READ_INDEX,
4953 IPW_TX_QUEUE_1_WRITE_INDEX,
4954 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4955 if (rc) {
4956 IPW_ERROR("Tx 1 queue init failed\n");
4957 goto error;
4958 }
4959 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4960 IPW_TX_QUEUE_2_READ_INDEX,
4961 IPW_TX_QUEUE_2_WRITE_INDEX,
4962 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4963 if (rc) {
4964 IPW_ERROR("Tx 2 queue init failed\n");
4965 goto error;
4966 }
4967 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4968 IPW_TX_QUEUE_3_READ_INDEX,
4969 IPW_TX_QUEUE_3_WRITE_INDEX,
4970 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4971 if (rc) {
4972 IPW_ERROR("Tx 3 queue init failed\n");
4973 goto error;
4974 }
4975 /* statistics */
4976 priv->rx_bufs_min = 0;
4977 priv->rx_pend_max = 0;
4978 return rc;
4979
4980 error:
4981 ipw_tx_queue_free(priv);
4982 return rc;
4983 }
4984
4985 /**
4986 * Reclaim Tx queue entries no more used by NIC.
4987 *
4988 * When FW advances 'R' index, all entries between old and
4989 * new 'R' index need to be reclaimed. As result, some free space
4990 * forms. If there is enough free space (> low mark), wake Tx queue.
4991 *
4992 * @note Need to protect against garbage in 'R' index
4993 * @param priv
4994 * @param txq
4995 * @param qindex
4996 * @return Number of used entries remains in the queue
4997 */
4998 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4999 struct clx2_tx_queue *txq, int qindex)
5000 {
5001 u32 hw_tail;
5002 int used;
5003 struct clx2_queue *q = &txq->q;
5004
5005 hw_tail = ipw_read32(priv, q->reg_r);
5006 if (hw_tail >= q->n_bd) {
5007 IPW_ERROR
5008 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5009 hw_tail, q->n_bd);
5010 goto done;
5011 }
5012 for (; q->last_used != hw_tail;
5013 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5014 ipw_queue_tx_free_tfd(priv, txq);
5015 priv->tx_packets++;
5016 }
5017 done:
5018 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5019 (qindex >= 0))
5020 netif_wake_queue(priv->net_dev);
5021 used = q->first_empty - q->last_used;
5022 if (used < 0)
5023 used += q->n_bd;
5024
5025 return used;
5026 }
5027
5028 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5029 int len, int sync)
5030 {
5031 struct clx2_tx_queue *txq = &priv->txq_cmd;
5032 struct clx2_queue *q = &txq->q;
5033 struct tfd_frame *tfd;
5034
5035 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5036 IPW_ERROR("No space for Tx\n");
5037 return -EBUSY;
5038 }
5039
5040 tfd = &txq->bd[q->first_empty];
5041 txq->txb[q->first_empty] = NULL;
5042
5043 memset(tfd, 0, sizeof(*tfd));
5044 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5045 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5046 priv->hcmd_seq++;
5047 tfd->u.cmd.index = hcmd;
5048 tfd->u.cmd.length = len;
5049 memcpy(tfd->u.cmd.payload, buf, len);
5050 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5051 ipw_write32(priv, q->reg_w, q->first_empty);
5052 _ipw_read32(priv, 0x90);
5053
5054 return 0;
5055 }
5056
5057 /*
5058 * Rx theory of operation
5059 *
5060 * The host allocates 32 DMA target addresses and passes the host address
5061 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5062 * 0 to 31
5063 *
5064 * Rx Queue Indexes
5065 * The host/firmware share two index registers for managing the Rx buffers.
5066 *
5067 * The READ index maps to the first position that the firmware may be writing
5068 * to -- the driver can read up to (but not including) this position and get
5069 * good data.
5070 * The READ index is managed by the firmware once the card is enabled.
5071 *
5072 * The WRITE index maps to the last position the driver has read from -- the
5073 * position preceding WRITE is the last slot the firmware can place a packet.
5074 *
5075 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5076 * WRITE = READ.
5077 *
5078 * During initialization the host sets up the READ queue position to the first
5079 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5080 *
5081 * When the firmware places a packet in a buffer it will advance the READ index
5082 * and fire the RX interrupt. The driver can then query the READ index and
5083 * process as many packets as possible, moving the WRITE index forward as it
5084 * resets the Rx queue buffers with new memory.
5085 *
5086 * The management in the driver is as follows:
5087 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5088 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5089 * to replensish the ipw->rxq->rx_free.
5090 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5091 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5092 * 'processed' and 'read' driver indexes as well)
5093 * + A received packet is processed and handed to the kernel network stack,
5094 * detached from the ipw->rxq. The driver 'processed' index is updated.
5095 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5096 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5097 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5098 * were enough free buffers and RX_STALLED is set it is cleared.
5099 *
5100 *
5101 * Driver sequence:
5102 *
5103 * ipw_rx_queue_alloc() Allocates rx_free
5104 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5105 * ipw_rx_queue_restock
5106 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5107 * queue, updates firmware pointers, and updates
5108 * the WRITE index. If insufficient rx_free buffers
5109 * are available, schedules ipw_rx_queue_replenish
5110 *
5111 * -- enable interrupts --
5112 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5113 * READ INDEX, detaching the SKB from the pool.
5114 * Moves the packet buffer from queue to rx_used.
5115 * Calls ipw_rx_queue_restock to refill any empty
5116 * slots.
5117 * ...
5118 *
5119 */
5120
5121 /*
5122 * If there are slots in the RX queue that need to be restocked,
5123 * and we have free pre-allocated buffers, fill the ranks as much
5124 * as we can pulling from rx_free.
5125 *
5126 * This moves the 'write' index forward to catch up with 'processed', and
5127 * also updates the memory address in the firmware to reference the new
5128 * target buffer.
5129 */
5130 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5131 {
5132 struct ipw_rx_queue *rxq = priv->rxq;
5133 struct list_head *element;
5134 struct ipw_rx_mem_buffer *rxb;
5135 unsigned long flags;
5136 int write;
5137
5138 spin_lock_irqsave(&rxq->lock, flags);
5139 write = rxq->write;
5140 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5141 element = rxq->rx_free.next;
5142 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5143 list_del(element);
5144
5145 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5146 rxb->dma_addr);
5147 rxq->queue[rxq->write] = rxb;
5148 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5149 rxq->free_count--;
5150 }
5151 spin_unlock_irqrestore(&rxq->lock, flags);
5152
5153 /* If the pre-allocated buffer pool is dropping low, schedule to
5154 * refill it */
5155 if (rxq->free_count <= RX_LOW_WATERMARK)
5156 queue_work(priv->workqueue, &priv->rx_replenish);
5157
5158 /* If we've added more space for the firmware to place data, tell it */
5159 if (write != rxq->write)
5160 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5161 }
5162
5163 /*
5164 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5165 * Also restock the Rx queue via ipw_rx_queue_restock.
5166 *
5167 * This is called as a scheduled work item (except for during intialization)
5168 */
5169 static void ipw_rx_queue_replenish(void *data)
5170 {
5171 struct ipw_priv *priv = data;
5172 struct ipw_rx_queue *rxq = priv->rxq;
5173 struct list_head *element;
5174 struct ipw_rx_mem_buffer *rxb;
5175 unsigned long flags;
5176
5177 spin_lock_irqsave(&rxq->lock, flags);
5178 while (!list_empty(&rxq->rx_used)) {
5179 element = rxq->rx_used.next;
5180 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5181 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5182 if (!rxb->skb) {
5183 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5184 priv->net_dev->name);
5185 /* We don't reschedule replenish work here -- we will
5186 * call the restock method and if it still needs
5187 * more buffers it will schedule replenish */
5188 break;
5189 }
5190 list_del(element);
5191
5192 rxb->dma_addr =
5193 pci_map_single(priv->pci_dev, rxb->skb->data,
5194 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5195
5196 list_add_tail(&rxb->list, &rxq->rx_free);
5197 rxq->free_count++;
5198 }
5199 spin_unlock_irqrestore(&rxq->lock, flags);
5200
5201 ipw_rx_queue_restock(priv);
5202 }
5203
5204 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5205 {
5206 struct ipw_priv *priv =
5207 container_of(work, struct ipw_priv, rx_replenish);
5208 mutex_lock(&priv->mutex);
5209 ipw_rx_queue_replenish(priv);
5210 mutex_unlock(&priv->mutex);
5211 }
5212
5213 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5214 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5215 * This free routine walks the list of POOL entries and if SKB is set to
5216 * non NULL it is unmapped and freed
5217 */
5218 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5219 {
5220 int i;
5221
5222 if (!rxq)
5223 return;
5224
5225 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5226 if (rxq->pool[i].skb != NULL) {
5227 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5228 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5229 dev_kfree_skb(rxq->pool[i].skb);
5230 }
5231 }
5232
5233 kfree(rxq);
5234 }
5235
5236 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5237 {
5238 struct ipw_rx_queue *rxq;
5239 int i;
5240
5241 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5242 if (unlikely(!rxq)) {
5243 IPW_ERROR("memory allocation failed\n");
5244 return NULL;
5245 }
5246 spin_lock_init(&rxq->lock);
5247 INIT_LIST_HEAD(&rxq->rx_free);
5248 INIT_LIST_HEAD(&rxq->rx_used);
5249
5250 /* Fill the rx_used queue with _all_ of the Rx buffers */
5251 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5252 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5253
5254 /* Set us so that we have processed and used all buffers, but have
5255 * not restocked the Rx queue with fresh buffers */
5256 rxq->read = rxq->write = 0;
5257 rxq->free_count = 0;
5258
5259 return rxq;
5260 }
5261
5262 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5263 {
5264 rate &= ~LIBIPW_BASIC_RATE_MASK;
5265 if (ieee_mode == IEEE_A) {
5266 switch (rate) {
5267 case LIBIPW_OFDM_RATE_6MB:
5268 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5269 1 : 0;
5270 case LIBIPW_OFDM_RATE_9MB:
5271 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5272 1 : 0;
5273 case LIBIPW_OFDM_RATE_12MB:
5274 return priv->
5275 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5276 case LIBIPW_OFDM_RATE_18MB:
5277 return priv->
5278 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5279 case LIBIPW_OFDM_RATE_24MB:
5280 return priv->
5281 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5282 case LIBIPW_OFDM_RATE_36MB:
5283 return priv->
5284 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5285 case LIBIPW_OFDM_RATE_48MB:
5286 return priv->
5287 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5288 case LIBIPW_OFDM_RATE_54MB:
5289 return priv->
5290 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5291 default:
5292 return 0;
5293 }
5294 }
5295
5296 /* B and G mixed */
5297 switch (rate) {
5298 case LIBIPW_CCK_RATE_1MB:
5299 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5300 case LIBIPW_CCK_RATE_2MB:
5301 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5302 case LIBIPW_CCK_RATE_5MB:
5303 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5304 case LIBIPW_CCK_RATE_11MB:
5305 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5306 }
5307
5308 /* If we are limited to B modulations, bail at this point */
5309 if (ieee_mode == IEEE_B)
5310 return 0;
5311
5312 /* G */
5313 switch (rate) {
5314 case LIBIPW_OFDM_RATE_6MB:
5315 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5316 case LIBIPW_OFDM_RATE_9MB:
5317 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5318 case LIBIPW_OFDM_RATE_12MB:
5319 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5320 case LIBIPW_OFDM_RATE_18MB:
5321 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5322 case LIBIPW_OFDM_RATE_24MB:
5323 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5324 case LIBIPW_OFDM_RATE_36MB:
5325 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5326 case LIBIPW_OFDM_RATE_48MB:
5327 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5328 case LIBIPW_OFDM_RATE_54MB:
5329 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5330 }
5331
5332 return 0;
5333 }
5334
5335 static int ipw_compatible_rates(struct ipw_priv *priv,
5336 const struct libipw_network *network,
5337 struct ipw_supported_rates *rates)
5338 {
5339 int num_rates, i;
5340
5341 memset(rates, 0, sizeof(*rates));
5342 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5343 rates->num_rates = 0;
5344 for (i = 0; i < num_rates; i++) {
5345 if (!ipw_is_rate_in_mask(priv, network->mode,
5346 network->rates[i])) {
5347
5348 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5349 IPW_DEBUG_SCAN("Adding masked mandatory "
5350 "rate %02X\n",
5351 network->rates[i]);
5352 rates->supported_rates[rates->num_rates++] =
5353 network->rates[i];
5354 continue;
5355 }
5356
5357 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5358 network->rates[i], priv->rates_mask);
5359 continue;
5360 }
5361
5362 rates->supported_rates[rates->num_rates++] = network->rates[i];
5363 }
5364
5365 num_rates = min(network->rates_ex_len,
5366 (u8) (IPW_MAX_RATES - num_rates));
5367 for (i = 0; i < num_rates; i++) {
5368 if (!ipw_is_rate_in_mask(priv, network->mode,
5369 network->rates_ex[i])) {
5370 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5371 IPW_DEBUG_SCAN("Adding masked mandatory "
5372 "rate %02X\n",
5373 network->rates_ex[i]);
5374 rates->supported_rates[rates->num_rates++] =
5375 network->rates[i];
5376 continue;
5377 }
5378
5379 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5380 network->rates_ex[i], priv->rates_mask);
5381 continue;
5382 }
5383
5384 rates->supported_rates[rates->num_rates++] =
5385 network->rates_ex[i];
5386 }
5387
5388 return 1;
5389 }
5390
5391 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5392 const struct ipw_supported_rates *src)
5393 {
5394 u8 i;
5395 for (i = 0; i < src->num_rates; i++)
5396 dest->supported_rates[i] = src->supported_rates[i];
5397 dest->num_rates = src->num_rates;
5398 }
5399
5400 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5401 * mask should ever be used -- right now all callers to add the scan rates are
5402 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5403 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5404 u8 modulation, u32 rate_mask)
5405 {
5406 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5407 LIBIPW_BASIC_RATE_MASK : 0;
5408
5409 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5410 rates->supported_rates[rates->num_rates++] =
5411 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5412
5413 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5414 rates->supported_rates[rates->num_rates++] =
5415 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5416
5417 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5418 rates->supported_rates[rates->num_rates++] = basic_mask |
5419 LIBIPW_CCK_RATE_5MB;
5420
5421 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5422 rates->supported_rates[rates->num_rates++] = basic_mask |
5423 LIBIPW_CCK_RATE_11MB;
5424 }
5425
5426 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5427 u8 modulation, u32 rate_mask)
5428 {
5429 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5430 LIBIPW_BASIC_RATE_MASK : 0;
5431
5432 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5433 rates->supported_rates[rates->num_rates++] = basic_mask |
5434 LIBIPW_OFDM_RATE_6MB;
5435
5436 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5437 rates->supported_rates[rates->num_rates++] =
5438 LIBIPW_OFDM_RATE_9MB;
5439
5440 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5441 rates->supported_rates[rates->num_rates++] = basic_mask |
5442 LIBIPW_OFDM_RATE_12MB;
5443
5444 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5445 rates->supported_rates[rates->num_rates++] =
5446 LIBIPW_OFDM_RATE_18MB;
5447
5448 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5449 rates->supported_rates[rates->num_rates++] = basic_mask |
5450 LIBIPW_OFDM_RATE_24MB;
5451
5452 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5453 rates->supported_rates[rates->num_rates++] =
5454 LIBIPW_OFDM_RATE_36MB;
5455
5456 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5457 rates->supported_rates[rates->num_rates++] =
5458 LIBIPW_OFDM_RATE_48MB;
5459
5460 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5461 rates->supported_rates[rates->num_rates++] =
5462 LIBIPW_OFDM_RATE_54MB;
5463 }
5464
5465 struct ipw_network_match {
5466 struct libipw_network *network;
5467 struct ipw_supported_rates rates;
5468 };
5469
5470 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5471 struct ipw_network_match *match,
5472 struct libipw_network *network,
5473 int roaming)
5474 {
5475 struct ipw_supported_rates rates;
5476 DECLARE_SSID_BUF(ssid);
5477
5478 /* Verify that this network's capability is compatible with the
5479 * current mode (AdHoc or Infrastructure) */
5480 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5481 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5482 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5483 "capability mismatch.\n",
5484 print_ssid(ssid, network->ssid,
5485 network->ssid_len),
5486 network->bssid);
5487 return 0;
5488 }
5489
5490 if (unlikely(roaming)) {
5491 /* If we are roaming, then ensure check if this is a valid
5492 * network to try and roam to */
5493 if ((network->ssid_len != match->network->ssid_len) ||
5494 memcmp(network->ssid, match->network->ssid,
5495 network->ssid_len)) {
5496 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5497 "because of non-network ESSID.\n",
5498 print_ssid(ssid, network->ssid,
5499 network->ssid_len),
5500 network->bssid);
5501 return 0;
5502 }
5503 } else {
5504 /* If an ESSID has been configured then compare the broadcast
5505 * ESSID to ours */
5506 if ((priv->config & CFG_STATIC_ESSID) &&
5507 ((network->ssid_len != priv->essid_len) ||
5508 memcmp(network->ssid, priv->essid,
5509 min(network->ssid_len, priv->essid_len)))) {
5510 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5511
5512 strncpy(escaped,
5513 print_ssid(ssid, network->ssid,
5514 network->ssid_len),
5515 sizeof(escaped));
5516 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5517 "because of ESSID mismatch: '%s'.\n",
5518 escaped, network->bssid,
5519 print_ssid(ssid, priv->essid,
5520 priv->essid_len));
5521 return 0;
5522 }
5523 }
5524
5525 /* If the old network rate is better than this one, don't bother
5526 * testing everything else. */
5527
5528 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5529 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5530 "current network.\n",
5531 print_ssid(ssid, match->network->ssid,
5532 match->network->ssid_len));
5533 return 0;
5534 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5535 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5536 "current network.\n",
5537 print_ssid(ssid, match->network->ssid,
5538 match->network->ssid_len));
5539 return 0;
5540 }
5541
5542 /* Now go through and see if the requested network is valid... */
5543 if (priv->ieee->scan_age != 0 &&
5544 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5545 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5546 "because of age: %ums.\n",
5547 print_ssid(ssid, network->ssid,
5548 network->ssid_len),
5549 network->bssid,
5550 jiffies_to_msecs(jiffies -
5551 network->last_scanned));
5552 return 0;
5553 }
5554
5555 if ((priv->config & CFG_STATIC_CHANNEL) &&
5556 (network->channel != priv->channel)) {
5557 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5558 "because of channel mismatch: %d != %d.\n",
5559 print_ssid(ssid, network->ssid,
5560 network->ssid_len),
5561 network->bssid,
5562 network->channel, priv->channel);
5563 return 0;
5564 }
5565
5566 /* Verify privacy compatability */
5567 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5568 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5569 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5570 "because of privacy mismatch: %s != %s.\n",
5571 print_ssid(ssid, network->ssid,
5572 network->ssid_len),
5573 network->bssid,
5574 priv->
5575 capability & CAP_PRIVACY_ON ? "on" : "off",
5576 network->
5577 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5578 "off");
5579 return 0;
5580 }
5581
5582 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5583 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5584 "because of the same BSSID match: %pM"
5585 ".\n", print_ssid(ssid, network->ssid,
5586 network->ssid_len),
5587 network->bssid,
5588 priv->bssid);
5589 return 0;
5590 }
5591
5592 /* Filter out any incompatible freq / mode combinations */
5593 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5594 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5595 "because of invalid frequency/mode "
5596 "combination.\n",
5597 print_ssid(ssid, network->ssid,
5598 network->ssid_len),
5599 network->bssid);
5600 return 0;
5601 }
5602
5603 /* Ensure that the rates supported by the driver are compatible with
5604 * this AP, including verification of basic rates (mandatory) */
5605 if (!ipw_compatible_rates(priv, network, &rates)) {
5606 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5607 "because configured rate mask excludes "
5608 "AP mandatory rate.\n",
5609 print_ssid(ssid, network->ssid,
5610 network->ssid_len),
5611 network->bssid);
5612 return 0;
5613 }
5614
5615 if (rates.num_rates == 0) {
5616 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5617 "because of no compatible rates.\n",
5618 print_ssid(ssid, network->ssid,
5619 network->ssid_len),
5620 network->bssid);
5621 return 0;
5622 }
5623
5624 /* TODO: Perform any further minimal comparititive tests. We do not
5625 * want to put too much policy logic here; intelligent scan selection
5626 * should occur within a generic IEEE 802.11 user space tool. */
5627
5628 /* Set up 'new' AP to this network */
5629 ipw_copy_rates(&match->rates, &rates);
5630 match->network = network;
5631 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5632 print_ssid(ssid, network->ssid, network->ssid_len),
5633 network->bssid);
5634
5635 return 1;
5636 }
5637
5638 static void ipw_merge_adhoc_network(struct work_struct *work)
5639 {
5640 DECLARE_SSID_BUF(ssid);
5641 struct ipw_priv *priv =
5642 container_of(work, struct ipw_priv, merge_networks);
5643 struct libipw_network *network = NULL;
5644 struct ipw_network_match match = {
5645 .network = priv->assoc_network
5646 };
5647
5648 if ((priv->status & STATUS_ASSOCIATED) &&
5649 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5650 /* First pass through ROAM process -- look for a better
5651 * network */
5652 unsigned long flags;
5653
5654 spin_lock_irqsave(&priv->ieee->lock, flags);
5655 list_for_each_entry(network, &priv->ieee->network_list, list) {
5656 if (network != priv->assoc_network)
5657 ipw_find_adhoc_network(priv, &match, network,
5658 1);
5659 }
5660 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5661
5662 if (match.network == priv->assoc_network) {
5663 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5664 "merge to.\n");
5665 return;
5666 }
5667
5668 mutex_lock(&priv->mutex);
5669 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5670 IPW_DEBUG_MERGE("remove network %s\n",
5671 print_ssid(ssid, priv->essid,
5672 priv->essid_len));
5673 ipw_remove_current_network(priv);
5674 }
5675
5676 ipw_disassociate(priv);
5677 priv->assoc_network = match.network;
5678 mutex_unlock(&priv->mutex);
5679 return;
5680 }
5681 }
5682
5683 static int ipw_best_network(struct ipw_priv *priv,
5684 struct ipw_network_match *match,
5685 struct libipw_network *network, int roaming)
5686 {
5687 struct ipw_supported_rates rates;
5688 DECLARE_SSID_BUF(ssid);
5689
5690 /* Verify that this network's capability is compatible with the
5691 * current mode (AdHoc or Infrastructure) */
5692 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5693 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5694 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5695 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5696 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5697 "capability mismatch.\n",
5698 print_ssid(ssid, network->ssid,
5699 network->ssid_len),
5700 network->bssid);
5701 return 0;
5702 }
5703
5704 if (unlikely(roaming)) {
5705 /* If we are roaming, then ensure check if this is a valid
5706 * network to try and roam to */
5707 if ((network->ssid_len != match->network->ssid_len) ||
5708 memcmp(network->ssid, match->network->ssid,
5709 network->ssid_len)) {
5710 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5711 "because of non-network ESSID.\n",
5712 print_ssid(ssid, network->ssid,
5713 network->ssid_len),
5714 network->bssid);
5715 return 0;
5716 }
5717 } else {
5718 /* If an ESSID has been configured then compare the broadcast
5719 * ESSID to ours */
5720 if ((priv->config & CFG_STATIC_ESSID) &&
5721 ((network->ssid_len != priv->essid_len) ||
5722 memcmp(network->ssid, priv->essid,
5723 min(network->ssid_len, priv->essid_len)))) {
5724 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5725 strncpy(escaped,
5726 print_ssid(ssid, network->ssid,
5727 network->ssid_len),
5728 sizeof(escaped));
5729 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5730 "because of ESSID mismatch: '%s'.\n",
5731 escaped, network->bssid,
5732 print_ssid(ssid, priv->essid,
5733 priv->essid_len));
5734 return 0;
5735 }
5736 }
5737
5738 /* If the old network rate is better than this one, don't bother
5739 * testing everything else. */
5740 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5741 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5742 strncpy(escaped,
5743 print_ssid(ssid, network->ssid, network->ssid_len),
5744 sizeof(escaped));
5745 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5746 "'%s (%pM)' has a stronger signal.\n",
5747 escaped, network->bssid,
5748 print_ssid(ssid, match->network->ssid,
5749 match->network->ssid_len),
5750 match->network->bssid);
5751 return 0;
5752 }
5753
5754 /* If this network has already had an association attempt within the
5755 * last 3 seconds, do not try and associate again... */
5756 if (network->last_associate &&
5757 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5758 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5759 "because of storming (%ums since last "
5760 "assoc attempt).\n",
5761 print_ssid(ssid, network->ssid,
5762 network->ssid_len),
5763 network->bssid,
5764 jiffies_to_msecs(jiffies -
5765 network->last_associate));
5766 return 0;
5767 }
5768
5769 /* Now go through and see if the requested network is valid... */
5770 if (priv->ieee->scan_age != 0 &&
5771 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5772 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5773 "because of age: %ums.\n",
5774 print_ssid(ssid, network->ssid,
5775 network->ssid_len),
5776 network->bssid,
5777 jiffies_to_msecs(jiffies -
5778 network->last_scanned));
5779 return 0;
5780 }
5781
5782 if ((priv->config & CFG_STATIC_CHANNEL) &&
5783 (network->channel != priv->channel)) {
5784 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5785 "because of channel mismatch: %d != %d.\n",
5786 print_ssid(ssid, network->ssid,
5787 network->ssid_len),
5788 network->bssid,
5789 network->channel, priv->channel);
5790 return 0;
5791 }
5792
5793 /* Verify privacy compatability */
5794 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5795 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5796 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5797 "because of privacy mismatch: %s != %s.\n",
5798 print_ssid(ssid, network->ssid,
5799 network->ssid_len),
5800 network->bssid,
5801 priv->capability & CAP_PRIVACY_ON ? "on" :
5802 "off",
5803 network->capability &
5804 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5805 return 0;
5806 }
5807
5808 if ((priv->config & CFG_STATIC_BSSID) &&
5809 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5810 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5811 "because of BSSID mismatch: %pM.\n",
5812 print_ssid(ssid, network->ssid,
5813 network->ssid_len),
5814 network->bssid, priv->bssid);
5815 return 0;
5816 }
5817
5818 /* Filter out any incompatible freq / mode combinations */
5819 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5820 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5821 "because of invalid frequency/mode "
5822 "combination.\n",
5823 print_ssid(ssid, network->ssid,
5824 network->ssid_len),
5825 network->bssid);
5826 return 0;
5827 }
5828
5829 /* Filter out invalid channel in current GEO */
5830 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5831 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5832 "because of invalid channel in current GEO\n",
5833 print_ssid(ssid, network->ssid,
5834 network->ssid_len),
5835 network->bssid);
5836 return 0;
5837 }
5838
5839 /* Ensure that the rates supported by the driver are compatible with
5840 * this AP, including verification of basic rates (mandatory) */
5841 if (!ipw_compatible_rates(priv, network, &rates)) {
5842 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5843 "because configured rate mask excludes "
5844 "AP mandatory rate.\n",
5845 print_ssid(ssid, network->ssid,
5846 network->ssid_len),
5847 network->bssid);
5848 return 0;
5849 }
5850
5851 if (rates.num_rates == 0) {
5852 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5853 "because of no compatible rates.\n",
5854 print_ssid(ssid, network->ssid,
5855 network->ssid_len),
5856 network->bssid);
5857 return 0;
5858 }
5859
5860 /* TODO: Perform any further minimal comparititive tests. We do not
5861 * want to put too much policy logic here; intelligent scan selection
5862 * should occur within a generic IEEE 802.11 user space tool. */
5863
5864 /* Set up 'new' AP to this network */
5865 ipw_copy_rates(&match->rates, &rates);
5866 match->network = network;
5867
5868 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5869 print_ssid(ssid, network->ssid, network->ssid_len),
5870 network->bssid);
5871
5872 return 1;
5873 }
5874
5875 static void ipw_adhoc_create(struct ipw_priv *priv,
5876 struct libipw_network *network)
5877 {
5878 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5879 int i;
5880
5881 /*
5882 * For the purposes of scanning, we can set our wireless mode
5883 * to trigger scans across combinations of bands, but when it
5884 * comes to creating a new ad-hoc network, we have tell the FW
5885 * exactly which band to use.
5886 *
5887 * We also have the possibility of an invalid channel for the
5888 * chossen band. Attempting to create a new ad-hoc network
5889 * with an invalid channel for wireless mode will trigger a
5890 * FW fatal error.
5891 *
5892 */
5893 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5894 case LIBIPW_52GHZ_BAND:
5895 network->mode = IEEE_A;
5896 i = libipw_channel_to_index(priv->ieee, priv->channel);
5897 BUG_ON(i == -1);
5898 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5899 IPW_WARNING("Overriding invalid channel\n");
5900 priv->channel = geo->a[0].channel;
5901 }
5902 break;
5903
5904 case LIBIPW_24GHZ_BAND:
5905 if (priv->ieee->mode & IEEE_G)
5906 network->mode = IEEE_G;
5907 else
5908 network->mode = IEEE_B;
5909 i = libipw_channel_to_index(priv->ieee, priv->channel);
5910 BUG_ON(i == -1);
5911 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5912 IPW_WARNING("Overriding invalid channel\n");
5913 priv->channel = geo->bg[0].channel;
5914 }
5915 break;
5916
5917 default:
5918 IPW_WARNING("Overriding invalid channel\n");
5919 if (priv->ieee->mode & IEEE_A) {
5920 network->mode = IEEE_A;
5921 priv->channel = geo->a[0].channel;
5922 } else if (priv->ieee->mode & IEEE_G) {
5923 network->mode = IEEE_G;
5924 priv->channel = geo->bg[0].channel;
5925 } else {
5926 network->mode = IEEE_B;
5927 priv->channel = geo->bg[0].channel;
5928 }
5929 break;
5930 }
5931
5932 network->channel = priv->channel;
5933 priv->config |= CFG_ADHOC_PERSIST;
5934 ipw_create_bssid(priv, network->bssid);
5935 network->ssid_len = priv->essid_len;
5936 memcpy(network->ssid, priv->essid, priv->essid_len);
5937 memset(&network->stats, 0, sizeof(network->stats));
5938 network->capability = WLAN_CAPABILITY_IBSS;
5939 if (!(priv->config & CFG_PREAMBLE_LONG))
5940 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5941 if (priv->capability & CAP_PRIVACY_ON)
5942 network->capability |= WLAN_CAPABILITY_PRIVACY;
5943 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5944 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5945 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5946 memcpy(network->rates_ex,
5947 &priv->rates.supported_rates[network->rates_len],
5948 network->rates_ex_len);
5949 network->last_scanned = 0;
5950 network->flags = 0;
5951 network->last_associate = 0;
5952 network->time_stamp[0] = 0;
5953 network->time_stamp[1] = 0;
5954 network->beacon_interval = 100; /* Default */
5955 network->listen_interval = 10; /* Default */
5956 network->atim_window = 0; /* Default */
5957 network->wpa_ie_len = 0;
5958 network->rsn_ie_len = 0;
5959 }
5960
5961 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5962 {
5963 struct ipw_tgi_tx_key key;
5964
5965 if (!(priv->ieee->sec.flags & (1 << index)))
5966 return;
5967
5968 key.key_id = index;
5969 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5970 key.security_type = type;
5971 key.station_index = 0; /* always 0 for BSS */
5972 key.flags = 0;
5973 /* 0 for new key; previous value of counter (after fatal error) */
5974 key.tx_counter[0] = cpu_to_le32(0);
5975 key.tx_counter[1] = cpu_to_le32(0);
5976
5977 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5978 }
5979
5980 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5981 {
5982 struct ipw_wep_key key;
5983 int i;
5984
5985 key.cmd_id = DINO_CMD_WEP_KEY;
5986 key.seq_num = 0;
5987
5988 /* Note: AES keys cannot be set for multiple times.
5989 * Only set it at the first time. */
5990 for (i = 0; i < 4; i++) {
5991 key.key_index = i | type;
5992 if (!(priv->ieee->sec.flags & (1 << i))) {
5993 key.key_size = 0;
5994 continue;
5995 }
5996
5997 key.key_size = priv->ieee->sec.key_sizes[i];
5998 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5999
6000 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6001 }
6002 }
6003
6004 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6005 {
6006 if (priv->ieee->host_encrypt)
6007 return;
6008
6009 switch (level) {
6010 case SEC_LEVEL_3:
6011 priv->sys_config.disable_unicast_decryption = 0;
6012 priv->ieee->host_decrypt = 0;
6013 break;
6014 case SEC_LEVEL_2:
6015 priv->sys_config.disable_unicast_decryption = 1;
6016 priv->ieee->host_decrypt = 1;
6017 break;
6018 case SEC_LEVEL_1:
6019 priv->sys_config.disable_unicast_decryption = 0;
6020 priv->ieee->host_decrypt = 0;
6021 break;
6022 case SEC_LEVEL_0:
6023 priv->sys_config.disable_unicast_decryption = 1;
6024 break;
6025 default:
6026 break;
6027 }
6028 }
6029
6030 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6031 {
6032 if (priv->ieee->host_encrypt)
6033 return;
6034
6035 switch (level) {
6036 case SEC_LEVEL_3:
6037 priv->sys_config.disable_multicast_decryption = 0;
6038 break;
6039 case SEC_LEVEL_2:
6040 priv->sys_config.disable_multicast_decryption = 1;
6041 break;
6042 case SEC_LEVEL_1:
6043 priv->sys_config.disable_multicast_decryption = 0;
6044 break;
6045 case SEC_LEVEL_0:
6046 priv->sys_config.disable_multicast_decryption = 1;
6047 break;
6048 default:
6049 break;
6050 }
6051 }
6052
6053 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6054 {
6055 switch (priv->ieee->sec.level) {
6056 case SEC_LEVEL_3:
6057 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6058 ipw_send_tgi_tx_key(priv,
6059 DCT_FLAG_EXT_SECURITY_CCM,
6060 priv->ieee->sec.active_key);
6061
6062 if (!priv->ieee->host_mc_decrypt)
6063 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6064 break;
6065 case SEC_LEVEL_2:
6066 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6067 ipw_send_tgi_tx_key(priv,
6068 DCT_FLAG_EXT_SECURITY_TKIP,
6069 priv->ieee->sec.active_key);
6070 break;
6071 case SEC_LEVEL_1:
6072 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6073 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6074 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6075 break;
6076 case SEC_LEVEL_0:
6077 default:
6078 break;
6079 }
6080 }
6081
6082 static void ipw_adhoc_check(void *data)
6083 {
6084 struct ipw_priv *priv = data;
6085
6086 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6087 !(priv->config & CFG_ADHOC_PERSIST)) {
6088 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6089 IPW_DL_STATE | IPW_DL_ASSOC,
6090 "Missed beacon: %d - disassociate\n",
6091 priv->missed_adhoc_beacons);
6092 ipw_remove_current_network(priv);
6093 ipw_disassociate(priv);
6094 return;
6095 }
6096
6097 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6098 le16_to_cpu(priv->assoc_request.beacon_interval));
6099 }
6100
6101 static void ipw_bg_adhoc_check(struct work_struct *work)
6102 {
6103 struct ipw_priv *priv =
6104 container_of(work, struct ipw_priv, adhoc_check.work);
6105 mutex_lock(&priv->mutex);
6106 ipw_adhoc_check(priv);
6107 mutex_unlock(&priv->mutex);
6108 }
6109
6110 static void ipw_debug_config(struct ipw_priv *priv)
6111 {
6112 DECLARE_SSID_BUF(ssid);
6113 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6114 "[CFG 0x%08X]\n", priv->config);
6115 if (priv->config & CFG_STATIC_CHANNEL)
6116 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6117 else
6118 IPW_DEBUG_INFO("Channel unlocked.\n");
6119 if (priv->config & CFG_STATIC_ESSID)
6120 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6121 print_ssid(ssid, priv->essid, priv->essid_len));
6122 else
6123 IPW_DEBUG_INFO("ESSID unlocked.\n");
6124 if (priv->config & CFG_STATIC_BSSID)
6125 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6126 else
6127 IPW_DEBUG_INFO("BSSID unlocked.\n");
6128 if (priv->capability & CAP_PRIVACY_ON)
6129 IPW_DEBUG_INFO("PRIVACY on\n");
6130 else
6131 IPW_DEBUG_INFO("PRIVACY off\n");
6132 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6133 }
6134
6135 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6136 {
6137 /* TODO: Verify that this works... */
6138 struct ipw_fixed_rate fr;
6139 u32 reg;
6140 u16 mask = 0;
6141 u16 new_tx_rates = priv->rates_mask;
6142
6143 /* Identify 'current FW band' and match it with the fixed
6144 * Tx rates */
6145
6146 switch (priv->ieee->freq_band) {
6147 case LIBIPW_52GHZ_BAND: /* A only */
6148 /* IEEE_A */
6149 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6150 /* Invalid fixed rate mask */
6151 IPW_DEBUG_WX
6152 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6153 new_tx_rates = 0;
6154 break;
6155 }
6156
6157 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6158 break;
6159
6160 default: /* 2.4Ghz or Mixed */
6161 /* IEEE_B */
6162 if (mode == IEEE_B) {
6163 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6164 /* Invalid fixed rate mask */
6165 IPW_DEBUG_WX
6166 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6167 new_tx_rates = 0;
6168 }
6169 break;
6170 }
6171
6172 /* IEEE_G */
6173 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6174 LIBIPW_OFDM_RATES_MASK)) {
6175 /* Invalid fixed rate mask */
6176 IPW_DEBUG_WX
6177 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6178 new_tx_rates = 0;
6179 break;
6180 }
6181
6182 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6183 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6184 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6185 }
6186
6187 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6188 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6189 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6190 }
6191
6192 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6193 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6194 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6195 }
6196
6197 new_tx_rates |= mask;
6198 break;
6199 }
6200
6201 fr.tx_rates = cpu_to_le16(new_tx_rates);
6202
6203 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6204 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6205 }
6206
6207 static void ipw_abort_scan(struct ipw_priv *priv)
6208 {
6209 int err;
6210
6211 if (priv->status & STATUS_SCAN_ABORTING) {
6212 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6213 return;
6214 }
6215 priv->status |= STATUS_SCAN_ABORTING;
6216
6217 err = ipw_send_scan_abort(priv);
6218 if (err)
6219 IPW_DEBUG_HC("Request to abort scan failed.\n");
6220 }
6221
6222 static void ipw_add_scan_channels(struct ipw_priv *priv,
6223 struct ipw_scan_request_ext *scan,
6224 int scan_type)
6225 {
6226 int channel_index = 0;
6227 const struct libipw_geo *geo;
6228 int i;
6229
6230 geo = libipw_get_geo(priv->ieee);
6231
6232 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6233 int start = channel_index;
6234 for (i = 0; i < geo->a_channels; i++) {
6235 if ((priv->status & STATUS_ASSOCIATED) &&
6236 geo->a[i].channel == priv->channel)
6237 continue;
6238 channel_index++;
6239 scan->channels_list[channel_index] = geo->a[i].channel;
6240 ipw_set_scan_type(scan, channel_index,
6241 geo->a[i].
6242 flags & LIBIPW_CH_PASSIVE_ONLY ?
6243 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6244 scan_type);
6245 }
6246
6247 if (start != channel_index) {
6248 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6249 (channel_index - start);
6250 channel_index++;
6251 }
6252 }
6253
6254 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6255 int start = channel_index;
6256 if (priv->config & CFG_SPEED_SCAN) {
6257 int index;
6258 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6259 /* nop out the list */
6260 [0] = 0
6261 };
6262
6263 u8 channel;
6264 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6265 channel =
6266 priv->speed_scan[priv->speed_scan_pos];
6267 if (channel == 0) {
6268 priv->speed_scan_pos = 0;
6269 channel = priv->speed_scan[0];
6270 }
6271 if ((priv->status & STATUS_ASSOCIATED) &&
6272 channel == priv->channel) {
6273 priv->speed_scan_pos++;
6274 continue;
6275 }
6276
6277 /* If this channel has already been
6278 * added in scan, break from loop
6279 * and this will be the first channel
6280 * in the next scan.
6281 */
6282 if (channels[channel - 1] != 0)
6283 break;
6284
6285 channels[channel - 1] = 1;
6286 priv->speed_scan_pos++;
6287 channel_index++;
6288 scan->channels_list[channel_index] = channel;
6289 index =
6290 libipw_channel_to_index(priv->ieee, channel);
6291 ipw_set_scan_type(scan, channel_index,
6292 geo->bg[index].
6293 flags &
6294 LIBIPW_CH_PASSIVE_ONLY ?
6295 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6296 : scan_type);
6297 }
6298 } else {
6299 for (i = 0; i < geo->bg_channels; i++) {
6300 if ((priv->status & STATUS_ASSOCIATED) &&
6301 geo->bg[i].channel == priv->channel)
6302 continue;
6303 channel_index++;
6304 scan->channels_list[channel_index] =
6305 geo->bg[i].channel;
6306 ipw_set_scan_type(scan, channel_index,
6307 geo->bg[i].
6308 flags &
6309 LIBIPW_CH_PASSIVE_ONLY ?
6310 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6311 : scan_type);
6312 }
6313 }
6314
6315 if (start != channel_index) {
6316 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6317 (channel_index - start);
6318 }
6319 }
6320 }
6321
6322 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6323 {
6324 /* staying on passive channels longer than the DTIM interval during a
6325 * scan, while associated, causes the firmware to cancel the scan
6326 * without notification. Hence, don't stay on passive channels longer
6327 * than the beacon interval.
6328 */
6329 if (priv->status & STATUS_ASSOCIATED
6330 && priv->assoc_network->beacon_interval > 10)
6331 return priv->assoc_network->beacon_interval - 10;
6332 else
6333 return 120;
6334 }
6335
6336 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6337 {
6338 struct ipw_scan_request_ext scan;
6339 int err = 0, scan_type;
6340
6341 if (!(priv->status & STATUS_INIT) ||
6342 (priv->status & STATUS_EXIT_PENDING))
6343 return 0;
6344
6345 mutex_lock(&priv->mutex);
6346
6347 if (direct && (priv->direct_scan_ssid_len == 0)) {
6348 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6349 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6350 goto done;
6351 }
6352
6353 if (priv->status & STATUS_SCANNING) {
6354 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6355 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6356 STATUS_SCAN_PENDING;
6357 goto done;
6358 }
6359
6360 if (!(priv->status & STATUS_SCAN_FORCED) &&
6361 priv->status & STATUS_SCAN_ABORTING) {
6362 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6363 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6364 STATUS_SCAN_PENDING;
6365 goto done;
6366 }
6367
6368 if (priv->status & STATUS_RF_KILL_MASK) {
6369 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6370 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6371 STATUS_SCAN_PENDING;
6372 goto done;
6373 }
6374
6375 memset(&scan, 0, sizeof(scan));
6376 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6377
6378 if (type == IW_SCAN_TYPE_PASSIVE) {
6379 IPW_DEBUG_WX("use passive scanning\n");
6380 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6381 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6382 cpu_to_le16(ipw_passive_dwell_time(priv));
6383 ipw_add_scan_channels(priv, &scan, scan_type);
6384 goto send_request;
6385 }
6386
6387 /* Use active scan by default. */
6388 if (priv->config & CFG_SPEED_SCAN)
6389 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6390 cpu_to_le16(30);
6391 else
6392 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6393 cpu_to_le16(20);
6394
6395 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6396 cpu_to_le16(20);
6397
6398 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6399 cpu_to_le16(ipw_passive_dwell_time(priv));
6400 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6401
6402 #ifdef CONFIG_IPW2200_MONITOR
6403 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6404 u8 channel;
6405 u8 band = 0;
6406
6407 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6408 case LIBIPW_52GHZ_BAND:
6409 band = (u8) (IPW_A_MODE << 6) | 1;
6410 channel = priv->channel;
6411 break;
6412
6413 case LIBIPW_24GHZ_BAND:
6414 band = (u8) (IPW_B_MODE << 6) | 1;
6415 channel = priv->channel;
6416 break;
6417
6418 default:
6419 band = (u8) (IPW_B_MODE << 6) | 1;
6420 channel = 9;
6421 break;
6422 }
6423
6424 scan.channels_list[0] = band;
6425 scan.channels_list[1] = channel;
6426 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6427
6428 /* NOTE: The card will sit on this channel for this time
6429 * period. Scan aborts are timing sensitive and frequently
6430 * result in firmware restarts. As such, it is best to
6431 * set a small dwell_time here and just keep re-issuing
6432 * scans. Otherwise fast channel hopping will not actually
6433 * hop channels.
6434 *
6435 * TODO: Move SPEED SCAN support to all modes and bands */
6436 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6437 cpu_to_le16(2000);
6438 } else {
6439 #endif /* CONFIG_IPW2200_MONITOR */
6440 /* Honor direct scans first, otherwise if we are roaming make
6441 * this a direct scan for the current network. Finally,
6442 * ensure that every other scan is a fast channel hop scan */
6443 if (direct) {
6444 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6445 priv->direct_scan_ssid_len);
6446 if (err) {
6447 IPW_DEBUG_HC("Attempt to send SSID command "
6448 "failed\n");
6449 goto done;
6450 }
6451
6452 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6453 } else if ((priv->status & STATUS_ROAMING)
6454 || (!(priv->status & STATUS_ASSOCIATED)
6455 && (priv->config & CFG_STATIC_ESSID)
6456 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6457 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6458 if (err) {
6459 IPW_DEBUG_HC("Attempt to send SSID command "
6460 "failed.\n");
6461 goto done;
6462 }
6463
6464 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6465 } else
6466 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6467
6468 ipw_add_scan_channels(priv, &scan, scan_type);
6469 #ifdef CONFIG_IPW2200_MONITOR
6470 }
6471 #endif
6472
6473 send_request:
6474 err = ipw_send_scan_request_ext(priv, &scan);
6475 if (err) {
6476 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6477 goto done;
6478 }
6479
6480 priv->status |= STATUS_SCANNING;
6481 if (direct) {
6482 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6483 priv->direct_scan_ssid_len = 0;
6484 } else
6485 priv->status &= ~STATUS_SCAN_PENDING;
6486
6487 queue_delayed_work(priv->workqueue, &priv->scan_check,
6488 IPW_SCAN_CHECK_WATCHDOG);
6489 done:
6490 mutex_unlock(&priv->mutex);
6491 return err;
6492 }
6493
6494 static void ipw_request_passive_scan(struct work_struct *work)
6495 {
6496 struct ipw_priv *priv =
6497 container_of(work, struct ipw_priv, request_passive_scan.work);
6498 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6499 }
6500
6501 static void ipw_request_scan(struct work_struct *work)
6502 {
6503 struct ipw_priv *priv =
6504 container_of(work, struct ipw_priv, request_scan.work);
6505 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6506 }
6507
6508 static void ipw_request_direct_scan(struct work_struct *work)
6509 {
6510 struct ipw_priv *priv =
6511 container_of(work, struct ipw_priv, request_direct_scan.work);
6512 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6513 }
6514
6515 static void ipw_bg_abort_scan(struct work_struct *work)
6516 {
6517 struct ipw_priv *priv =
6518 container_of(work, struct ipw_priv, abort_scan);
6519 mutex_lock(&priv->mutex);
6520 ipw_abort_scan(priv);
6521 mutex_unlock(&priv->mutex);
6522 }
6523
6524 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6525 {
6526 /* This is called when wpa_supplicant loads and closes the driver
6527 * interface. */
6528 priv->ieee->wpa_enabled = value;
6529 return 0;
6530 }
6531
6532 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6533 {
6534 struct libipw_device *ieee = priv->ieee;
6535 struct libipw_security sec = {
6536 .flags = SEC_AUTH_MODE,
6537 };
6538 int ret = 0;
6539
6540 if (value & IW_AUTH_ALG_SHARED_KEY) {
6541 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6542 ieee->open_wep = 0;
6543 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6544 sec.auth_mode = WLAN_AUTH_OPEN;
6545 ieee->open_wep = 1;
6546 } else if (value & IW_AUTH_ALG_LEAP) {
6547 sec.auth_mode = WLAN_AUTH_LEAP;
6548 ieee->open_wep = 1;
6549 } else
6550 return -EINVAL;
6551
6552 if (ieee->set_security)
6553 ieee->set_security(ieee->dev, &sec);
6554 else
6555 ret = -EOPNOTSUPP;
6556
6557 return ret;
6558 }
6559
6560 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6561 int wpa_ie_len)
6562 {
6563 /* make sure WPA is enabled */
6564 ipw_wpa_enable(priv, 1);
6565 }
6566
6567 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6568 char *capabilities, int length)
6569 {
6570 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6571
6572 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6573 capabilities);
6574 }
6575
6576 /*
6577 * WE-18 support
6578 */
6579
6580 /* SIOCSIWGENIE */
6581 static int ipw_wx_set_genie(struct net_device *dev,
6582 struct iw_request_info *info,
6583 union iwreq_data *wrqu, char *extra)
6584 {
6585 struct ipw_priv *priv = libipw_priv(dev);
6586 struct libipw_device *ieee = priv->ieee;
6587 u8 *buf;
6588 int err = 0;
6589
6590 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6591 (wrqu->data.length && extra == NULL))
6592 return -EINVAL;
6593
6594 if (wrqu->data.length) {
6595 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6596 if (buf == NULL) {
6597 err = -ENOMEM;
6598 goto out;
6599 }
6600
6601 memcpy(buf, extra, wrqu->data.length);
6602 kfree(ieee->wpa_ie);
6603 ieee->wpa_ie = buf;
6604 ieee->wpa_ie_len = wrqu->data.length;
6605 } else {
6606 kfree(ieee->wpa_ie);
6607 ieee->wpa_ie = NULL;
6608 ieee->wpa_ie_len = 0;
6609 }
6610
6611 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6612 out:
6613 return err;
6614 }
6615
6616 /* SIOCGIWGENIE */
6617 static int ipw_wx_get_genie(struct net_device *dev,
6618 struct iw_request_info *info,
6619 union iwreq_data *wrqu, char *extra)
6620 {
6621 struct ipw_priv *priv = libipw_priv(dev);
6622 struct libipw_device *ieee = priv->ieee;
6623 int err = 0;
6624
6625 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6626 wrqu->data.length = 0;
6627 goto out;
6628 }
6629
6630 if (wrqu->data.length < ieee->wpa_ie_len) {
6631 err = -E2BIG;
6632 goto out;
6633 }
6634
6635 wrqu->data.length = ieee->wpa_ie_len;
6636 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6637
6638 out:
6639 return err;
6640 }
6641
6642 static int wext_cipher2level(int cipher)
6643 {
6644 switch (cipher) {
6645 case IW_AUTH_CIPHER_NONE:
6646 return SEC_LEVEL_0;
6647 case IW_AUTH_CIPHER_WEP40:
6648 case IW_AUTH_CIPHER_WEP104:
6649 return SEC_LEVEL_1;
6650 case IW_AUTH_CIPHER_TKIP:
6651 return SEC_LEVEL_2;
6652 case IW_AUTH_CIPHER_CCMP:
6653 return SEC_LEVEL_3;
6654 default:
6655 return -1;
6656 }
6657 }
6658
6659 /* SIOCSIWAUTH */
6660 static int ipw_wx_set_auth(struct net_device *dev,
6661 struct iw_request_info *info,
6662 union iwreq_data *wrqu, char *extra)
6663 {
6664 struct ipw_priv *priv = libipw_priv(dev);
6665 struct libipw_device *ieee = priv->ieee;
6666 struct iw_param *param = &wrqu->param;
6667 struct lib80211_crypt_data *crypt;
6668 unsigned long flags;
6669 int ret = 0;
6670
6671 switch (param->flags & IW_AUTH_INDEX) {
6672 case IW_AUTH_WPA_VERSION:
6673 break;
6674 case IW_AUTH_CIPHER_PAIRWISE:
6675 ipw_set_hw_decrypt_unicast(priv,
6676 wext_cipher2level(param->value));
6677 break;
6678 case IW_AUTH_CIPHER_GROUP:
6679 ipw_set_hw_decrypt_multicast(priv,
6680 wext_cipher2level(param->value));
6681 break;
6682 case IW_AUTH_KEY_MGMT:
6683 /*
6684 * ipw2200 does not use these parameters
6685 */
6686 break;
6687
6688 case IW_AUTH_TKIP_COUNTERMEASURES:
6689 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6690 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6691 break;
6692
6693 flags = crypt->ops->get_flags(crypt->priv);
6694
6695 if (param->value)
6696 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6697 else
6698 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6699
6700 crypt->ops->set_flags(flags, crypt->priv);
6701
6702 break;
6703
6704 case IW_AUTH_DROP_UNENCRYPTED:{
6705 /* HACK:
6706 *
6707 * wpa_supplicant calls set_wpa_enabled when the driver
6708 * is loaded and unloaded, regardless of if WPA is being
6709 * used. No other calls are made which can be used to
6710 * determine if encryption will be used or not prior to
6711 * association being expected. If encryption is not being
6712 * used, drop_unencrypted is set to false, else true -- we
6713 * can use this to determine if the CAP_PRIVACY_ON bit should
6714 * be set.
6715 */
6716 struct libipw_security sec = {
6717 .flags = SEC_ENABLED,
6718 .enabled = param->value,
6719 };
6720 priv->ieee->drop_unencrypted = param->value;
6721 /* We only change SEC_LEVEL for open mode. Others
6722 * are set by ipw_wpa_set_encryption.
6723 */
6724 if (!param->value) {
6725 sec.flags |= SEC_LEVEL;
6726 sec.level = SEC_LEVEL_0;
6727 } else {
6728 sec.flags |= SEC_LEVEL;
6729 sec.level = SEC_LEVEL_1;
6730 }
6731 if (priv->ieee->set_security)
6732 priv->ieee->set_security(priv->ieee->dev, &sec);
6733 break;
6734 }
6735
6736 case IW_AUTH_80211_AUTH_ALG:
6737 ret = ipw_wpa_set_auth_algs(priv, param->value);
6738 break;
6739
6740 case IW_AUTH_WPA_ENABLED:
6741 ret = ipw_wpa_enable(priv, param->value);
6742 ipw_disassociate(priv);
6743 break;
6744
6745 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6746 ieee->ieee802_1x = param->value;
6747 break;
6748
6749 case IW_AUTH_PRIVACY_INVOKED:
6750 ieee->privacy_invoked = param->value;
6751 break;
6752
6753 default:
6754 return -EOPNOTSUPP;
6755 }
6756 return ret;
6757 }
6758
6759 /* SIOCGIWAUTH */
6760 static int ipw_wx_get_auth(struct net_device *dev,
6761 struct iw_request_info *info,
6762 union iwreq_data *wrqu, char *extra)
6763 {
6764 struct ipw_priv *priv = libipw_priv(dev);
6765 struct libipw_device *ieee = priv->ieee;
6766 struct lib80211_crypt_data *crypt;
6767 struct iw_param *param = &wrqu->param;
6768 int ret = 0;
6769
6770 switch (param->flags & IW_AUTH_INDEX) {
6771 case IW_AUTH_WPA_VERSION:
6772 case IW_AUTH_CIPHER_PAIRWISE:
6773 case IW_AUTH_CIPHER_GROUP:
6774 case IW_AUTH_KEY_MGMT:
6775 /*
6776 * wpa_supplicant will control these internally
6777 */
6778 ret = -EOPNOTSUPP;
6779 break;
6780
6781 case IW_AUTH_TKIP_COUNTERMEASURES:
6782 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6783 if (!crypt || !crypt->ops->get_flags)
6784 break;
6785
6786 param->value = (crypt->ops->get_flags(crypt->priv) &
6787 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6788
6789 break;
6790
6791 case IW_AUTH_DROP_UNENCRYPTED:
6792 param->value = ieee->drop_unencrypted;
6793 break;
6794
6795 case IW_AUTH_80211_AUTH_ALG:
6796 param->value = ieee->sec.auth_mode;
6797 break;
6798
6799 case IW_AUTH_WPA_ENABLED:
6800 param->value = ieee->wpa_enabled;
6801 break;
6802
6803 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6804 param->value = ieee->ieee802_1x;
6805 break;
6806
6807 case IW_AUTH_ROAMING_CONTROL:
6808 case IW_AUTH_PRIVACY_INVOKED:
6809 param->value = ieee->privacy_invoked;
6810 break;
6811
6812 default:
6813 return -EOPNOTSUPP;
6814 }
6815 return 0;
6816 }
6817
6818 /* SIOCSIWENCODEEXT */
6819 static int ipw_wx_set_encodeext(struct net_device *dev,
6820 struct iw_request_info *info,
6821 union iwreq_data *wrqu, char *extra)
6822 {
6823 struct ipw_priv *priv = libipw_priv(dev);
6824 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6825
6826 if (hwcrypto) {
6827 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6828 /* IPW HW can't build TKIP MIC,
6829 host decryption still needed */
6830 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6831 priv->ieee->host_mc_decrypt = 1;
6832 else {
6833 priv->ieee->host_encrypt = 0;
6834 priv->ieee->host_encrypt_msdu = 1;
6835 priv->ieee->host_decrypt = 1;
6836 }
6837 } else {
6838 priv->ieee->host_encrypt = 0;
6839 priv->ieee->host_encrypt_msdu = 0;
6840 priv->ieee->host_decrypt = 0;
6841 priv->ieee->host_mc_decrypt = 0;
6842 }
6843 }
6844
6845 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6846 }
6847
6848 /* SIOCGIWENCODEEXT */
6849 static int ipw_wx_get_encodeext(struct net_device *dev,
6850 struct iw_request_info *info,
6851 union iwreq_data *wrqu, char *extra)
6852 {
6853 struct ipw_priv *priv = libipw_priv(dev);
6854 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6855 }
6856
6857 /* SIOCSIWMLME */
6858 static int ipw_wx_set_mlme(struct net_device *dev,
6859 struct iw_request_info *info,
6860 union iwreq_data *wrqu, char *extra)
6861 {
6862 struct ipw_priv *priv = libipw_priv(dev);
6863 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6864 __le16 reason;
6865
6866 reason = cpu_to_le16(mlme->reason_code);
6867
6868 switch (mlme->cmd) {
6869 case IW_MLME_DEAUTH:
6870 /* silently ignore */
6871 break;
6872
6873 case IW_MLME_DISASSOC:
6874 ipw_disassociate(priv);
6875 break;
6876
6877 default:
6878 return -EOPNOTSUPP;
6879 }
6880 return 0;
6881 }
6882
6883 #ifdef CONFIG_IPW2200_QOS
6884
6885 /* QoS */
6886 /*
6887 * get the modulation type of the current network or
6888 * the card current mode
6889 */
6890 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6891 {
6892 u8 mode = 0;
6893
6894 if (priv->status & STATUS_ASSOCIATED) {
6895 unsigned long flags;
6896
6897 spin_lock_irqsave(&priv->ieee->lock, flags);
6898 mode = priv->assoc_network->mode;
6899 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6900 } else {
6901 mode = priv->ieee->mode;
6902 }
6903 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6904 return mode;
6905 }
6906
6907 /*
6908 * Handle management frame beacon and probe response
6909 */
6910 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6911 int active_network,
6912 struct libipw_network *network)
6913 {
6914 u32 size = sizeof(struct libipw_qos_parameters);
6915
6916 if (network->capability & WLAN_CAPABILITY_IBSS)
6917 network->qos_data.active = network->qos_data.supported;
6918
6919 if (network->flags & NETWORK_HAS_QOS_MASK) {
6920 if (active_network &&
6921 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6922 network->qos_data.active = network->qos_data.supported;
6923
6924 if ((network->qos_data.active == 1) && (active_network == 1) &&
6925 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6926 (network->qos_data.old_param_count !=
6927 network->qos_data.param_count)) {
6928 network->qos_data.old_param_count =
6929 network->qos_data.param_count;
6930 schedule_work(&priv->qos_activate);
6931 IPW_DEBUG_QOS("QoS parameters change call "
6932 "qos_activate\n");
6933 }
6934 } else {
6935 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6936 memcpy(&network->qos_data.parameters,
6937 &def_parameters_CCK, size);
6938 else
6939 memcpy(&network->qos_data.parameters,
6940 &def_parameters_OFDM, size);
6941
6942 if ((network->qos_data.active == 1) && (active_network == 1)) {
6943 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6944 schedule_work(&priv->qos_activate);
6945 }
6946
6947 network->qos_data.active = 0;
6948 network->qos_data.supported = 0;
6949 }
6950 if ((priv->status & STATUS_ASSOCIATED) &&
6951 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6952 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6953 if (network->capability & WLAN_CAPABILITY_IBSS)
6954 if ((network->ssid_len ==
6955 priv->assoc_network->ssid_len) &&
6956 !memcmp(network->ssid,
6957 priv->assoc_network->ssid,
6958 network->ssid_len)) {
6959 queue_work(priv->workqueue,
6960 &priv->merge_networks);
6961 }
6962 }
6963
6964 return 0;
6965 }
6966
6967 /*
6968 * This function set up the firmware to support QoS. It sends
6969 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6970 */
6971 static int ipw_qos_activate(struct ipw_priv *priv,
6972 struct libipw_qos_data *qos_network_data)
6973 {
6974 int err;
6975 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6976 struct libipw_qos_parameters *active_one = NULL;
6977 u32 size = sizeof(struct libipw_qos_parameters);
6978 u32 burst_duration;
6979 int i;
6980 u8 type;
6981
6982 type = ipw_qos_current_mode(priv);
6983
6984 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6985 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6986 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6987 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6988
6989 if (qos_network_data == NULL) {
6990 if (type == IEEE_B) {
6991 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6992 active_one = &def_parameters_CCK;
6993 } else
6994 active_one = &def_parameters_OFDM;
6995
6996 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6997 burst_duration = ipw_qos_get_burst_duration(priv);
6998 for (i = 0; i < QOS_QUEUE_NUM; i++)
6999 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7000 cpu_to_le16(burst_duration);
7001 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7002 if (type == IEEE_B) {
7003 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
7004 type);
7005 if (priv->qos_data.qos_enable == 0)
7006 active_one = &def_parameters_CCK;
7007 else
7008 active_one = priv->qos_data.def_qos_parm_CCK;
7009 } else {
7010 if (priv->qos_data.qos_enable == 0)
7011 active_one = &def_parameters_OFDM;
7012 else
7013 active_one = priv->qos_data.def_qos_parm_OFDM;
7014 }
7015 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7016 } else {
7017 unsigned long flags;
7018 int active;
7019
7020 spin_lock_irqsave(&priv->ieee->lock, flags);
7021 active_one = &(qos_network_data->parameters);
7022 qos_network_data->old_param_count =
7023 qos_network_data->param_count;
7024 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7025 active = qos_network_data->supported;
7026 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7027
7028 if (active == 0) {
7029 burst_duration = ipw_qos_get_burst_duration(priv);
7030 for (i = 0; i < QOS_QUEUE_NUM; i++)
7031 qos_parameters[QOS_PARAM_SET_ACTIVE].
7032 tx_op_limit[i] = cpu_to_le16(burst_duration);
7033 }
7034 }
7035
7036 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7037 err = ipw_send_qos_params_command(priv,
7038 (struct libipw_qos_parameters *)
7039 &(qos_parameters[0]));
7040 if (err)
7041 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7042
7043 return err;
7044 }
7045
7046 /*
7047 * send IPW_CMD_WME_INFO to the firmware
7048 */
7049 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7050 {
7051 int ret = 0;
7052 struct libipw_qos_information_element qos_info;
7053
7054 if (priv == NULL)
7055 return -1;
7056
7057 qos_info.elementID = QOS_ELEMENT_ID;
7058 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7059
7060 qos_info.version = QOS_VERSION_1;
7061 qos_info.ac_info = 0;
7062
7063 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7064 qos_info.qui_type = QOS_OUI_TYPE;
7065 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7066
7067 ret = ipw_send_qos_info_command(priv, &qos_info);
7068 if (ret != 0) {
7069 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7070 }
7071 return ret;
7072 }
7073
7074 /*
7075 * Set the QoS parameter with the association request structure
7076 */
7077 static int ipw_qos_association(struct ipw_priv *priv,
7078 struct libipw_network *network)
7079 {
7080 int err = 0;
7081 struct libipw_qos_data *qos_data = NULL;
7082 struct libipw_qos_data ibss_data = {
7083 .supported = 1,
7084 .active = 1,
7085 };
7086
7087 switch (priv->ieee->iw_mode) {
7088 case IW_MODE_ADHOC:
7089 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7090
7091 qos_data = &ibss_data;
7092 break;
7093
7094 case IW_MODE_INFRA:
7095 qos_data = &network->qos_data;
7096 break;
7097
7098 default:
7099 BUG();
7100 break;
7101 }
7102
7103 err = ipw_qos_activate(priv, qos_data);
7104 if (err) {
7105 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7106 return err;
7107 }
7108
7109 if (priv->qos_data.qos_enable && qos_data->supported) {
7110 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7111 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7112 return ipw_qos_set_info_element(priv);
7113 }
7114
7115 return 0;
7116 }
7117
7118 /*
7119 * handling the beaconing responses. if we get different QoS setting
7120 * off the network from the associated setting, adjust the QoS
7121 * setting
7122 */
7123 static int ipw_qos_association_resp(struct ipw_priv *priv,
7124 struct libipw_network *network)
7125 {
7126 int ret = 0;
7127 unsigned long flags;
7128 u32 size = sizeof(struct libipw_qos_parameters);
7129 int set_qos_param = 0;
7130
7131 if ((priv == NULL) || (network == NULL) ||
7132 (priv->assoc_network == NULL))
7133 return ret;
7134
7135 if (!(priv->status & STATUS_ASSOCIATED))
7136 return ret;
7137
7138 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7139 return ret;
7140
7141 spin_lock_irqsave(&priv->ieee->lock, flags);
7142 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7143 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7144 sizeof(struct libipw_qos_data));
7145 priv->assoc_network->qos_data.active = 1;
7146 if ((network->qos_data.old_param_count !=
7147 network->qos_data.param_count)) {
7148 set_qos_param = 1;
7149 network->qos_data.old_param_count =
7150 network->qos_data.param_count;
7151 }
7152
7153 } else {
7154 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7155 memcpy(&priv->assoc_network->qos_data.parameters,
7156 &def_parameters_CCK, size);
7157 else
7158 memcpy(&priv->assoc_network->qos_data.parameters,
7159 &def_parameters_OFDM, size);
7160 priv->assoc_network->qos_data.active = 0;
7161 priv->assoc_network->qos_data.supported = 0;
7162 set_qos_param = 1;
7163 }
7164
7165 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7166
7167 if (set_qos_param == 1)
7168 schedule_work(&priv->qos_activate);
7169
7170 return ret;
7171 }
7172
7173 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7174 {
7175 u32 ret = 0;
7176
7177 if ((priv == NULL))
7178 return 0;
7179
7180 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7181 ret = priv->qos_data.burst_duration_CCK;
7182 else
7183 ret = priv->qos_data.burst_duration_OFDM;
7184
7185 return ret;
7186 }
7187
7188 /*
7189 * Initialize the setting of QoS global
7190 */
7191 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7192 int burst_enable, u32 burst_duration_CCK,
7193 u32 burst_duration_OFDM)
7194 {
7195 priv->qos_data.qos_enable = enable;
7196
7197 if (priv->qos_data.qos_enable) {
7198 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7199 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7200 IPW_DEBUG_QOS("QoS is enabled\n");
7201 } else {
7202 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7203 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7204 IPW_DEBUG_QOS("QoS is not enabled\n");
7205 }
7206
7207 priv->qos_data.burst_enable = burst_enable;
7208
7209 if (burst_enable) {
7210 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7211 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7212 } else {
7213 priv->qos_data.burst_duration_CCK = 0;
7214 priv->qos_data.burst_duration_OFDM = 0;
7215 }
7216 }
7217
7218 /*
7219 * map the packet priority to the right TX Queue
7220 */
7221 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7222 {
7223 if (priority > 7 || !priv->qos_data.qos_enable)
7224 priority = 0;
7225
7226 return from_priority_to_tx_queue[priority] - 1;
7227 }
7228
7229 static int ipw_is_qos_active(struct net_device *dev,
7230 struct sk_buff *skb)
7231 {
7232 struct ipw_priv *priv = libipw_priv(dev);
7233 struct libipw_qos_data *qos_data = NULL;
7234 int active, supported;
7235 u8 *daddr = skb->data + ETH_ALEN;
7236 int unicast = !is_multicast_ether_addr(daddr);
7237
7238 if (!(priv->status & STATUS_ASSOCIATED))
7239 return 0;
7240
7241 qos_data = &priv->assoc_network->qos_data;
7242
7243 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7244 if (unicast == 0)
7245 qos_data->active = 0;
7246 else
7247 qos_data->active = qos_data->supported;
7248 }
7249 active = qos_data->active;
7250 supported = qos_data->supported;
7251 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7252 "unicast %d\n",
7253 priv->qos_data.qos_enable, active, supported, unicast);
7254 if (active && priv->qos_data.qos_enable)
7255 return 1;
7256
7257 return 0;
7258
7259 }
7260 /*
7261 * add QoS parameter to the TX command
7262 */
7263 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7264 u16 priority,
7265 struct tfd_data *tfd)
7266 {
7267 int tx_queue_id = 0;
7268
7269
7270 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7271 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7272
7273 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7274 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7275 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7276 }
7277 return 0;
7278 }
7279
7280 /*
7281 * background support to run QoS activate functionality
7282 */
7283 static void ipw_bg_qos_activate(struct work_struct *work)
7284 {
7285 struct ipw_priv *priv =
7286 container_of(work, struct ipw_priv, qos_activate);
7287
7288 mutex_lock(&priv->mutex);
7289
7290 if (priv->status & STATUS_ASSOCIATED)
7291 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7292
7293 mutex_unlock(&priv->mutex);
7294 }
7295
7296 static int ipw_handle_probe_response(struct net_device *dev,
7297 struct libipw_probe_response *resp,
7298 struct libipw_network *network)
7299 {
7300 struct ipw_priv *priv = libipw_priv(dev);
7301 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7302 (network == priv->assoc_network));
7303
7304 ipw_qos_handle_probe_response(priv, active_network, network);
7305
7306 return 0;
7307 }
7308
7309 static int ipw_handle_beacon(struct net_device *dev,
7310 struct libipw_beacon *resp,
7311 struct libipw_network *network)
7312 {
7313 struct ipw_priv *priv = libipw_priv(dev);
7314 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7315 (network == priv->assoc_network));
7316
7317 ipw_qos_handle_probe_response(priv, active_network, network);
7318
7319 return 0;
7320 }
7321
7322 static int ipw_handle_assoc_response(struct net_device *dev,
7323 struct libipw_assoc_response *resp,
7324 struct libipw_network *network)
7325 {
7326 struct ipw_priv *priv = libipw_priv(dev);
7327 ipw_qos_association_resp(priv, network);
7328 return 0;
7329 }
7330
7331 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7332 *qos_param)
7333 {
7334 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7335 sizeof(*qos_param) * 3, qos_param);
7336 }
7337
7338 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7339 *qos_param)
7340 {
7341 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7342 qos_param);
7343 }
7344
7345 #endif /* CONFIG_IPW2200_QOS */
7346
7347 static int ipw_associate_network(struct ipw_priv *priv,
7348 struct libipw_network *network,
7349 struct ipw_supported_rates *rates, int roaming)
7350 {
7351 int err;
7352 DECLARE_SSID_BUF(ssid);
7353
7354 if (priv->config & CFG_FIXED_RATE)
7355 ipw_set_fixed_rate(priv, network->mode);
7356
7357 if (!(priv->config & CFG_STATIC_ESSID)) {
7358 priv->essid_len = min(network->ssid_len,
7359 (u8) IW_ESSID_MAX_SIZE);
7360 memcpy(priv->essid, network->ssid, priv->essid_len);
7361 }
7362
7363 network->last_associate = jiffies;
7364
7365 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7366 priv->assoc_request.channel = network->channel;
7367 priv->assoc_request.auth_key = 0;
7368
7369 if ((priv->capability & CAP_PRIVACY_ON) &&
7370 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7371 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7372 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7373
7374 if (priv->ieee->sec.level == SEC_LEVEL_1)
7375 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7376
7377 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7378 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7379 priv->assoc_request.auth_type = AUTH_LEAP;
7380 else
7381 priv->assoc_request.auth_type = AUTH_OPEN;
7382
7383 if (priv->ieee->wpa_ie_len) {
7384 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7385 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7386 priv->ieee->wpa_ie_len);
7387 }
7388
7389 /*
7390 * It is valid for our ieee device to support multiple modes, but
7391 * when it comes to associating to a given network we have to choose
7392 * just one mode.
7393 */
7394 if (network->mode & priv->ieee->mode & IEEE_A)
7395 priv->assoc_request.ieee_mode = IPW_A_MODE;
7396 else if (network->mode & priv->ieee->mode & IEEE_G)
7397 priv->assoc_request.ieee_mode = IPW_G_MODE;
7398 else if (network->mode & priv->ieee->mode & IEEE_B)
7399 priv->assoc_request.ieee_mode = IPW_B_MODE;
7400
7401 priv->assoc_request.capability = cpu_to_le16(network->capability);
7402 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7403 && !(priv->config & CFG_PREAMBLE_LONG)) {
7404 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7405 } else {
7406 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7407
7408 /* Clear the short preamble if we won't be supporting it */
7409 priv->assoc_request.capability &=
7410 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7411 }
7412
7413 /* Clear capability bits that aren't used in Ad Hoc */
7414 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7415 priv->assoc_request.capability &=
7416 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7417
7418 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7419 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7420 roaming ? "Rea" : "A",
7421 print_ssid(ssid, priv->essid, priv->essid_len),
7422 network->channel,
7423 ipw_modes[priv->assoc_request.ieee_mode],
7424 rates->num_rates,
7425 (priv->assoc_request.preamble_length ==
7426 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7427 network->capability &
7428 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7429 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7430 priv->capability & CAP_PRIVACY_ON ?
7431 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7432 "(open)") : "",
7433 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7434 priv->capability & CAP_PRIVACY_ON ?
7435 '1' + priv->ieee->sec.active_key : '.',
7436 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7437
7438 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7439 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7440 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7441 priv->assoc_request.assoc_type = HC_IBSS_START;
7442 priv->assoc_request.assoc_tsf_msw = 0;
7443 priv->assoc_request.assoc_tsf_lsw = 0;
7444 } else {
7445 if (unlikely(roaming))
7446 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7447 else
7448 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7449 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7450 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7451 }
7452
7453 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7454
7455 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7456 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7457 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7458 } else {
7459 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7460 priv->assoc_request.atim_window = 0;
7461 }
7462
7463 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7464
7465 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7466 if (err) {
7467 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7468 return err;
7469 }
7470
7471 rates->ieee_mode = priv->assoc_request.ieee_mode;
7472 rates->purpose = IPW_RATE_CONNECT;
7473 ipw_send_supported_rates(priv, rates);
7474
7475 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7476 priv->sys_config.dot11g_auto_detection = 1;
7477 else
7478 priv->sys_config.dot11g_auto_detection = 0;
7479
7480 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7481 priv->sys_config.answer_broadcast_ssid_probe = 1;
7482 else
7483 priv->sys_config.answer_broadcast_ssid_probe = 0;
7484
7485 err = ipw_send_system_config(priv);
7486 if (err) {
7487 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7488 return err;
7489 }
7490
7491 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7492 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7493 if (err) {
7494 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7495 return err;
7496 }
7497
7498 /*
7499 * If preemption is enabled, it is possible for the association
7500 * to complete before we return from ipw_send_associate. Therefore
7501 * we have to be sure and update our priviate data first.
7502 */
7503 priv->channel = network->channel;
7504 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7505 priv->status |= STATUS_ASSOCIATING;
7506 priv->status &= ~STATUS_SECURITY_UPDATED;
7507
7508 priv->assoc_network = network;
7509
7510 #ifdef CONFIG_IPW2200_QOS
7511 ipw_qos_association(priv, network);
7512 #endif
7513
7514 err = ipw_send_associate(priv, &priv->assoc_request);
7515 if (err) {
7516 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7517 return err;
7518 }
7519
7520 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7521 print_ssid(ssid, priv->essid, priv->essid_len),
7522 priv->bssid);
7523
7524 return 0;
7525 }
7526
7527 static void ipw_roam(void *data)
7528 {
7529 struct ipw_priv *priv = data;
7530 struct libipw_network *network = NULL;
7531 struct ipw_network_match match = {
7532 .network = priv->assoc_network
7533 };
7534
7535 /* The roaming process is as follows:
7536 *
7537 * 1. Missed beacon threshold triggers the roaming process by
7538 * setting the status ROAM bit and requesting a scan.
7539 * 2. When the scan completes, it schedules the ROAM work
7540 * 3. The ROAM work looks at all of the known networks for one that
7541 * is a better network than the currently associated. If none
7542 * found, the ROAM process is over (ROAM bit cleared)
7543 * 4. If a better network is found, a disassociation request is
7544 * sent.
7545 * 5. When the disassociation completes, the roam work is again
7546 * scheduled. The second time through, the driver is no longer
7547 * associated, and the newly selected network is sent an
7548 * association request.
7549 * 6. At this point ,the roaming process is complete and the ROAM
7550 * status bit is cleared.
7551 */
7552
7553 /* If we are no longer associated, and the roaming bit is no longer
7554 * set, then we are not actively roaming, so just return */
7555 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7556 return;
7557
7558 if (priv->status & STATUS_ASSOCIATED) {
7559 /* First pass through ROAM process -- look for a better
7560 * network */
7561 unsigned long flags;
7562 u8 rssi = priv->assoc_network->stats.rssi;
7563 priv->assoc_network->stats.rssi = -128;
7564 spin_lock_irqsave(&priv->ieee->lock, flags);
7565 list_for_each_entry(network, &priv->ieee->network_list, list) {
7566 if (network != priv->assoc_network)
7567 ipw_best_network(priv, &match, network, 1);
7568 }
7569 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7570 priv->assoc_network->stats.rssi = rssi;
7571
7572 if (match.network == priv->assoc_network) {
7573 IPW_DEBUG_ASSOC("No better APs in this network to "
7574 "roam to.\n");
7575 priv->status &= ~STATUS_ROAMING;
7576 ipw_debug_config(priv);
7577 return;
7578 }
7579
7580 ipw_send_disassociate(priv, 1);
7581 priv->assoc_network = match.network;
7582
7583 return;
7584 }
7585
7586 /* Second pass through ROAM process -- request association */
7587 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7588 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7589 priv->status &= ~STATUS_ROAMING;
7590 }
7591
7592 static void ipw_bg_roam(struct work_struct *work)
7593 {
7594 struct ipw_priv *priv =
7595 container_of(work, struct ipw_priv, roam);
7596 mutex_lock(&priv->mutex);
7597 ipw_roam(priv);
7598 mutex_unlock(&priv->mutex);
7599 }
7600
7601 static int ipw_associate(void *data)
7602 {
7603 struct ipw_priv *priv = data;
7604
7605 struct libipw_network *network = NULL;
7606 struct ipw_network_match match = {
7607 .network = NULL
7608 };
7609 struct ipw_supported_rates *rates;
7610 struct list_head *element;
7611 unsigned long flags;
7612 DECLARE_SSID_BUF(ssid);
7613
7614 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7615 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7616 return 0;
7617 }
7618
7619 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7620 IPW_DEBUG_ASSOC("Not attempting association (already in "
7621 "progress)\n");
7622 return 0;
7623 }
7624
7625 if (priv->status & STATUS_DISASSOCIATING) {
7626 IPW_DEBUG_ASSOC("Not attempting association (in "
7627 "disassociating)\n ");
7628 queue_work(priv->workqueue, &priv->associate);
7629 return 0;
7630 }
7631
7632 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7633 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7634 "initialized)\n");
7635 return 0;
7636 }
7637
7638 if (!(priv->config & CFG_ASSOCIATE) &&
7639 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7640 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7641 return 0;
7642 }
7643
7644 /* Protect our use of the network_list */
7645 spin_lock_irqsave(&priv->ieee->lock, flags);
7646 list_for_each_entry(network, &priv->ieee->network_list, list)
7647 ipw_best_network(priv, &match, network, 0);
7648
7649 network = match.network;
7650 rates = &match.rates;
7651
7652 if (network == NULL &&
7653 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7654 priv->config & CFG_ADHOC_CREATE &&
7655 priv->config & CFG_STATIC_ESSID &&
7656 priv->config & CFG_STATIC_CHANNEL) {
7657 /* Use oldest network if the free list is empty */
7658 if (list_empty(&priv->ieee->network_free_list)) {
7659 struct libipw_network *oldest = NULL;
7660 struct libipw_network *target;
7661
7662 list_for_each_entry(target, &priv->ieee->network_list, list) {
7663 if ((oldest == NULL) ||
7664 (target->last_scanned < oldest->last_scanned))
7665 oldest = target;
7666 }
7667
7668 /* If there are no more slots, expire the oldest */
7669 list_del(&oldest->list);
7670 target = oldest;
7671 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7672 "network list.\n",
7673 print_ssid(ssid, target->ssid,
7674 target->ssid_len),
7675 target->bssid);
7676 list_add_tail(&target->list,
7677 &priv->ieee->network_free_list);
7678 }
7679
7680 element = priv->ieee->network_free_list.next;
7681 network = list_entry(element, struct libipw_network, list);
7682 ipw_adhoc_create(priv, network);
7683 rates = &priv->rates;
7684 list_del(element);
7685 list_add_tail(&network->list, &priv->ieee->network_list);
7686 }
7687 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7688
7689 /* If we reached the end of the list, then we don't have any valid
7690 * matching APs */
7691 if (!network) {
7692 ipw_debug_config(priv);
7693
7694 if (!(priv->status & STATUS_SCANNING)) {
7695 if (!(priv->config & CFG_SPEED_SCAN))
7696 queue_delayed_work(priv->workqueue,
7697 &priv->request_scan,
7698 SCAN_INTERVAL);
7699 else
7700 queue_delayed_work(priv->workqueue,
7701 &priv->request_scan, 0);
7702 }
7703
7704 return 0;
7705 }
7706
7707 ipw_associate_network(priv, network, rates, 0);
7708
7709 return 1;
7710 }
7711
7712 static void ipw_bg_associate(struct work_struct *work)
7713 {
7714 struct ipw_priv *priv =
7715 container_of(work, struct ipw_priv, associate);
7716 mutex_lock(&priv->mutex);
7717 ipw_associate(priv);
7718 mutex_unlock(&priv->mutex);
7719 }
7720
7721 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7722 struct sk_buff *skb)
7723 {
7724 struct ieee80211_hdr *hdr;
7725 u16 fc;
7726
7727 hdr = (struct ieee80211_hdr *)skb->data;
7728 fc = le16_to_cpu(hdr->frame_control);
7729 if (!(fc & IEEE80211_FCTL_PROTECTED))
7730 return;
7731
7732 fc &= ~IEEE80211_FCTL_PROTECTED;
7733 hdr->frame_control = cpu_to_le16(fc);
7734 switch (priv->ieee->sec.level) {
7735 case SEC_LEVEL_3:
7736 /* Remove CCMP HDR */
7737 memmove(skb->data + LIBIPW_3ADDR_LEN,
7738 skb->data + LIBIPW_3ADDR_LEN + 8,
7739 skb->len - LIBIPW_3ADDR_LEN - 8);
7740 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7741 break;
7742 case SEC_LEVEL_2:
7743 break;
7744 case SEC_LEVEL_1:
7745 /* Remove IV */
7746 memmove(skb->data + LIBIPW_3ADDR_LEN,
7747 skb->data + LIBIPW_3ADDR_LEN + 4,
7748 skb->len - LIBIPW_3ADDR_LEN - 4);
7749 skb_trim(skb, skb->len - 8); /* IV + ICV */
7750 break;
7751 case SEC_LEVEL_0:
7752 break;
7753 default:
7754 printk(KERN_ERR "Unknow security level %d\n",
7755 priv->ieee->sec.level);
7756 break;
7757 }
7758 }
7759
7760 static void ipw_handle_data_packet(struct ipw_priv *priv,
7761 struct ipw_rx_mem_buffer *rxb,
7762 struct libipw_rx_stats *stats)
7763 {
7764 struct net_device *dev = priv->net_dev;
7765 struct libipw_hdr_4addr *hdr;
7766 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7767
7768 /* We received data from the HW, so stop the watchdog */
7769 dev->trans_start = jiffies;
7770
7771 /* We only process data packets if the
7772 * interface is open */
7773 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7774 skb_tailroom(rxb->skb))) {
7775 dev->stats.rx_errors++;
7776 priv->wstats.discard.misc++;
7777 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7778 return;
7779 } else if (unlikely(!netif_running(priv->net_dev))) {
7780 dev->stats.rx_dropped++;
7781 priv->wstats.discard.misc++;
7782 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7783 return;
7784 }
7785
7786 /* Advance skb->data to the start of the actual payload */
7787 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7788
7789 /* Set the size of the skb to the size of the frame */
7790 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7791
7792 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7793
7794 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7795 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7796 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7797 (is_multicast_ether_addr(hdr->addr1) ?
7798 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7799 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7800
7801 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7802 dev->stats.rx_errors++;
7803 else { /* libipw_rx succeeded, so it now owns the SKB */
7804 rxb->skb = NULL;
7805 __ipw_led_activity_on(priv);
7806 }
7807 }
7808
7809 #ifdef CONFIG_IPW2200_RADIOTAP
7810 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7811 struct ipw_rx_mem_buffer *rxb,
7812 struct libipw_rx_stats *stats)
7813 {
7814 struct net_device *dev = priv->net_dev;
7815 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7816 struct ipw_rx_frame *frame = &pkt->u.frame;
7817
7818 /* initial pull of some data */
7819 u16 received_channel = frame->received_channel;
7820 u8 antennaAndPhy = frame->antennaAndPhy;
7821 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7822 u16 pktrate = frame->rate;
7823
7824 /* Magic struct that slots into the radiotap header -- no reason
7825 * to build this manually element by element, we can write it much
7826 * more efficiently than we can parse it. ORDER MATTERS HERE */
7827 struct ipw_rt_hdr *ipw_rt;
7828
7829 short len = le16_to_cpu(pkt->u.frame.length);
7830
7831 /* We received data from the HW, so stop the watchdog */
7832 dev->trans_start = jiffies;
7833
7834 /* We only process data packets if the
7835 * interface is open */
7836 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7837 skb_tailroom(rxb->skb))) {
7838 dev->stats.rx_errors++;
7839 priv->wstats.discard.misc++;
7840 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7841 return;
7842 } else if (unlikely(!netif_running(priv->net_dev))) {
7843 dev->stats.rx_dropped++;
7844 priv->wstats.discard.misc++;
7845 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7846 return;
7847 }
7848
7849 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7850 * that now */
7851 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7852 /* FIXME: Should alloc bigger skb instead */
7853 dev->stats.rx_dropped++;
7854 priv->wstats.discard.misc++;
7855 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7856 return;
7857 }
7858
7859 /* copy the frame itself */
7860 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7861 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7862
7863 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7864
7865 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7866 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7867 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7868
7869 /* Big bitfield of all the fields we provide in radiotap */
7870 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7871 (1 << IEEE80211_RADIOTAP_TSFT) |
7872 (1 << IEEE80211_RADIOTAP_FLAGS) |
7873 (1 << IEEE80211_RADIOTAP_RATE) |
7874 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7875 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7876 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7877 (1 << IEEE80211_RADIOTAP_ANTENNA));
7878
7879 /* Zero the flags, we'll add to them as we go */
7880 ipw_rt->rt_flags = 0;
7881 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7882 frame->parent_tsf[2] << 16 |
7883 frame->parent_tsf[1] << 8 |
7884 frame->parent_tsf[0]);
7885
7886 /* Convert signal to DBM */
7887 ipw_rt->rt_dbmsignal = antsignal;
7888 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7889
7890 /* Convert the channel data and set the flags */
7891 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7892 if (received_channel > 14) { /* 802.11a */
7893 ipw_rt->rt_chbitmask =
7894 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7895 } else if (antennaAndPhy & 32) { /* 802.11b */
7896 ipw_rt->rt_chbitmask =
7897 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7898 } else { /* 802.11g */
7899 ipw_rt->rt_chbitmask =
7900 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7901 }
7902
7903 /* set the rate in multiples of 500k/s */
7904 switch (pktrate) {
7905 case IPW_TX_RATE_1MB:
7906 ipw_rt->rt_rate = 2;
7907 break;
7908 case IPW_TX_RATE_2MB:
7909 ipw_rt->rt_rate = 4;
7910 break;
7911 case IPW_TX_RATE_5MB:
7912 ipw_rt->rt_rate = 10;
7913 break;
7914 case IPW_TX_RATE_6MB:
7915 ipw_rt->rt_rate = 12;
7916 break;
7917 case IPW_TX_RATE_9MB:
7918 ipw_rt->rt_rate = 18;
7919 break;
7920 case IPW_TX_RATE_11MB:
7921 ipw_rt->rt_rate = 22;
7922 break;
7923 case IPW_TX_RATE_12MB:
7924 ipw_rt->rt_rate = 24;
7925 break;
7926 case IPW_TX_RATE_18MB:
7927 ipw_rt->rt_rate = 36;
7928 break;
7929 case IPW_TX_RATE_24MB:
7930 ipw_rt->rt_rate = 48;
7931 break;
7932 case IPW_TX_RATE_36MB:
7933 ipw_rt->rt_rate = 72;
7934 break;
7935 case IPW_TX_RATE_48MB:
7936 ipw_rt->rt_rate = 96;
7937 break;
7938 case IPW_TX_RATE_54MB:
7939 ipw_rt->rt_rate = 108;
7940 break;
7941 default:
7942 ipw_rt->rt_rate = 0;
7943 break;
7944 }
7945
7946 /* antenna number */
7947 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7948
7949 /* set the preamble flag if we have it */
7950 if ((antennaAndPhy & 64))
7951 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7952
7953 /* Set the size of the skb to the size of the frame */
7954 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7955
7956 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7957
7958 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7959 dev->stats.rx_errors++;
7960 else { /* libipw_rx succeeded, so it now owns the SKB */
7961 rxb->skb = NULL;
7962 /* no LED during capture */
7963 }
7964 }
7965 #endif
7966
7967 #ifdef CONFIG_IPW2200_PROMISCUOUS
7968 #define libipw_is_probe_response(fc) \
7969 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7970 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7971
7972 #define libipw_is_management(fc) \
7973 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7974
7975 #define libipw_is_control(fc) \
7976 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7977
7978 #define libipw_is_data(fc) \
7979 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7980
7981 #define libipw_is_assoc_request(fc) \
7982 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7983
7984 #define libipw_is_reassoc_request(fc) \
7985 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7986
7987 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7988 struct ipw_rx_mem_buffer *rxb,
7989 struct libipw_rx_stats *stats)
7990 {
7991 struct net_device *dev = priv->prom_net_dev;
7992 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7993 struct ipw_rx_frame *frame = &pkt->u.frame;
7994 struct ipw_rt_hdr *ipw_rt;
7995
7996 /* First cache any information we need before we overwrite
7997 * the information provided in the skb from the hardware */
7998 struct ieee80211_hdr *hdr;
7999 u16 channel = frame->received_channel;
8000 u8 phy_flags = frame->antennaAndPhy;
8001 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8002 s8 noise = (s8) le16_to_cpu(frame->noise);
8003 u8 rate = frame->rate;
8004 short len = le16_to_cpu(pkt->u.frame.length);
8005 struct sk_buff *skb;
8006 int hdr_only = 0;
8007 u16 filter = priv->prom_priv->filter;
8008
8009 /* If the filter is set to not include Rx frames then return */
8010 if (filter & IPW_PROM_NO_RX)
8011 return;
8012
8013 /* We received data from the HW, so stop the watchdog */
8014 dev->trans_start = jiffies;
8015
8016 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8017 dev->stats.rx_errors++;
8018 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8019 return;
8020 }
8021
8022 /* We only process data packets if the interface is open */
8023 if (unlikely(!netif_running(dev))) {
8024 dev->stats.rx_dropped++;
8025 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8026 return;
8027 }
8028
8029 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8030 * that now */
8031 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8032 /* FIXME: Should alloc bigger skb instead */
8033 dev->stats.rx_dropped++;
8034 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8035 return;
8036 }
8037
8038 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8039 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8040 if (filter & IPW_PROM_NO_MGMT)
8041 return;
8042 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8043 hdr_only = 1;
8044 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8045 if (filter & IPW_PROM_NO_CTL)
8046 return;
8047 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8048 hdr_only = 1;
8049 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8050 if (filter & IPW_PROM_NO_DATA)
8051 return;
8052 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8053 hdr_only = 1;
8054 }
8055
8056 /* Copy the SKB since this is for the promiscuous side */
8057 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8058 if (skb == NULL) {
8059 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8060 return;
8061 }
8062
8063 /* copy the frame data to write after where the radiotap header goes */
8064 ipw_rt = (void *)skb->data;
8065
8066 if (hdr_only)
8067 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8068
8069 memcpy(ipw_rt->payload, hdr, len);
8070
8071 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8072 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8073 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8074
8075 /* Set the size of the skb to the size of the frame */
8076 skb_put(skb, sizeof(*ipw_rt) + len);
8077
8078 /* Big bitfield of all the fields we provide in radiotap */
8079 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8080 (1 << IEEE80211_RADIOTAP_TSFT) |
8081 (1 << IEEE80211_RADIOTAP_FLAGS) |
8082 (1 << IEEE80211_RADIOTAP_RATE) |
8083 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8084 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8085 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8086 (1 << IEEE80211_RADIOTAP_ANTENNA));
8087
8088 /* Zero the flags, we'll add to them as we go */
8089 ipw_rt->rt_flags = 0;
8090 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8091 frame->parent_tsf[2] << 16 |
8092 frame->parent_tsf[1] << 8 |
8093 frame->parent_tsf[0]);
8094
8095 /* Convert to DBM */
8096 ipw_rt->rt_dbmsignal = signal;
8097 ipw_rt->rt_dbmnoise = noise;
8098
8099 /* Convert the channel data and set the flags */
8100 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8101 if (channel > 14) { /* 802.11a */
8102 ipw_rt->rt_chbitmask =
8103 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8104 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8105 ipw_rt->rt_chbitmask =
8106 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8107 } else { /* 802.11g */
8108 ipw_rt->rt_chbitmask =
8109 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8110 }
8111
8112 /* set the rate in multiples of 500k/s */
8113 switch (rate) {
8114 case IPW_TX_RATE_1MB:
8115 ipw_rt->rt_rate = 2;
8116 break;
8117 case IPW_TX_RATE_2MB:
8118 ipw_rt->rt_rate = 4;
8119 break;
8120 case IPW_TX_RATE_5MB:
8121 ipw_rt->rt_rate = 10;
8122 break;
8123 case IPW_TX_RATE_6MB:
8124 ipw_rt->rt_rate = 12;
8125 break;
8126 case IPW_TX_RATE_9MB:
8127 ipw_rt->rt_rate = 18;
8128 break;
8129 case IPW_TX_RATE_11MB:
8130 ipw_rt->rt_rate = 22;
8131 break;
8132 case IPW_TX_RATE_12MB:
8133 ipw_rt->rt_rate = 24;
8134 break;
8135 case IPW_TX_RATE_18MB:
8136 ipw_rt->rt_rate = 36;
8137 break;
8138 case IPW_TX_RATE_24MB:
8139 ipw_rt->rt_rate = 48;
8140 break;
8141 case IPW_TX_RATE_36MB:
8142 ipw_rt->rt_rate = 72;
8143 break;
8144 case IPW_TX_RATE_48MB:
8145 ipw_rt->rt_rate = 96;
8146 break;
8147 case IPW_TX_RATE_54MB:
8148 ipw_rt->rt_rate = 108;
8149 break;
8150 default:
8151 ipw_rt->rt_rate = 0;
8152 break;
8153 }
8154
8155 /* antenna number */
8156 ipw_rt->rt_antenna = (phy_flags & 3);
8157
8158 /* set the preamble flag if we have it */
8159 if (phy_flags & (1 << 6))
8160 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8161
8162 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8163
8164 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8165 dev->stats.rx_errors++;
8166 dev_kfree_skb_any(skb);
8167 }
8168 }
8169 #endif
8170
8171 static int is_network_packet(struct ipw_priv *priv,
8172 struct libipw_hdr_4addr *header)
8173 {
8174 /* Filter incoming packets to determine if they are targetted toward
8175 * this network, discarding packets coming from ourselves */
8176 switch (priv->ieee->iw_mode) {
8177 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8178 /* packets from our adapter are dropped (echo) */
8179 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8180 return 0;
8181
8182 /* {broad,multi}cast packets to our BSSID go through */
8183 if (is_multicast_ether_addr(header->addr1))
8184 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8185
8186 /* packets to our adapter go through */
8187 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8188 ETH_ALEN);
8189
8190 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8191 /* packets from our adapter are dropped (echo) */
8192 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8193 return 0;
8194
8195 /* {broad,multi}cast packets to our BSS go through */
8196 if (is_multicast_ether_addr(header->addr1))
8197 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8198
8199 /* packets to our adapter go through */
8200 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8201 ETH_ALEN);
8202 }
8203
8204 return 1;
8205 }
8206
8207 #define IPW_PACKET_RETRY_TIME HZ
8208
8209 static int is_duplicate_packet(struct ipw_priv *priv,
8210 struct libipw_hdr_4addr *header)
8211 {
8212 u16 sc = le16_to_cpu(header->seq_ctl);
8213 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8214 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8215 u16 *last_seq, *last_frag;
8216 unsigned long *last_time;
8217
8218 switch (priv->ieee->iw_mode) {
8219 case IW_MODE_ADHOC:
8220 {
8221 struct list_head *p;
8222 struct ipw_ibss_seq *entry = NULL;
8223 u8 *mac = header->addr2;
8224 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8225
8226 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8227 entry =
8228 list_entry(p, struct ipw_ibss_seq, list);
8229 if (!memcmp(entry->mac, mac, ETH_ALEN))
8230 break;
8231 }
8232 if (p == &priv->ibss_mac_hash[index]) {
8233 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8234 if (!entry) {
8235 IPW_ERROR
8236 ("Cannot malloc new mac entry\n");
8237 return 0;
8238 }
8239 memcpy(entry->mac, mac, ETH_ALEN);
8240 entry->seq_num = seq;
8241 entry->frag_num = frag;
8242 entry->packet_time = jiffies;
8243 list_add(&entry->list,
8244 &priv->ibss_mac_hash[index]);
8245 return 0;
8246 }
8247 last_seq = &entry->seq_num;
8248 last_frag = &entry->frag_num;
8249 last_time = &entry->packet_time;
8250 break;
8251 }
8252 case IW_MODE_INFRA:
8253 last_seq = &priv->last_seq_num;
8254 last_frag = &priv->last_frag_num;
8255 last_time = &priv->last_packet_time;
8256 break;
8257 default:
8258 return 0;
8259 }
8260 if ((*last_seq == seq) &&
8261 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8262 if (*last_frag == frag)
8263 goto drop;
8264 if (*last_frag + 1 != frag)
8265 /* out-of-order fragment */
8266 goto drop;
8267 } else
8268 *last_seq = seq;
8269
8270 *last_frag = frag;
8271 *last_time = jiffies;
8272 return 0;
8273
8274 drop:
8275 /* Comment this line now since we observed the card receives
8276 * duplicate packets but the FCTL_RETRY bit is not set in the
8277 * IBSS mode with fragmentation enabled.
8278 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8279 return 1;
8280 }
8281
8282 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8283 struct ipw_rx_mem_buffer *rxb,
8284 struct libipw_rx_stats *stats)
8285 {
8286 struct sk_buff *skb = rxb->skb;
8287 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8288 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8289 (skb->data + IPW_RX_FRAME_SIZE);
8290
8291 libipw_rx_mgt(priv->ieee, header, stats);
8292
8293 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8294 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8295 IEEE80211_STYPE_PROBE_RESP) ||
8296 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8297 IEEE80211_STYPE_BEACON))) {
8298 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8299 ipw_add_station(priv, header->addr2);
8300 }
8301
8302 if (priv->config & CFG_NET_STATS) {
8303 IPW_DEBUG_HC("sending stat packet\n");
8304
8305 /* Set the size of the skb to the size of the full
8306 * ipw header and 802.11 frame */
8307 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8308 IPW_RX_FRAME_SIZE);
8309
8310 /* Advance past the ipw packet header to the 802.11 frame */
8311 skb_pull(skb, IPW_RX_FRAME_SIZE);
8312
8313 /* Push the libipw_rx_stats before the 802.11 frame */
8314 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8315
8316 skb->dev = priv->ieee->dev;
8317
8318 /* Point raw at the libipw_stats */
8319 skb_reset_mac_header(skb);
8320
8321 skb->pkt_type = PACKET_OTHERHOST;
8322 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8323 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8324 netif_rx(skb);
8325 rxb->skb = NULL;
8326 }
8327 }
8328
8329 /*
8330 * Main entry function for recieving a packet with 80211 headers. This
8331 * should be called when ever the FW has notified us that there is a new
8332 * skb in the recieve queue.
8333 */
8334 static void ipw_rx(struct ipw_priv *priv)
8335 {
8336 struct ipw_rx_mem_buffer *rxb;
8337 struct ipw_rx_packet *pkt;
8338 struct libipw_hdr_4addr *header;
8339 u32 r, w, i;
8340 u8 network_packet;
8341 u8 fill_rx = 0;
8342
8343 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8344 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8345 i = priv->rxq->read;
8346
8347 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8348 fill_rx = 1;
8349
8350 while (i != r) {
8351 rxb = priv->rxq->queue[i];
8352 if (unlikely(rxb == NULL)) {
8353 printk(KERN_CRIT "Queue not allocated!\n");
8354 break;
8355 }
8356 priv->rxq->queue[i] = NULL;
8357
8358 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8359 IPW_RX_BUF_SIZE,
8360 PCI_DMA_FROMDEVICE);
8361
8362 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8363 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8364 pkt->header.message_type,
8365 pkt->header.rx_seq_num, pkt->header.control_bits);
8366
8367 switch (pkt->header.message_type) {
8368 case RX_FRAME_TYPE: /* 802.11 frame */ {
8369 struct libipw_rx_stats stats = {
8370 .rssi = pkt->u.frame.rssi_dbm -
8371 IPW_RSSI_TO_DBM,
8372 .signal =
8373 pkt->u.frame.rssi_dbm -
8374 IPW_RSSI_TO_DBM + 0x100,
8375 .noise =
8376 le16_to_cpu(pkt->u.frame.noise),
8377 .rate = pkt->u.frame.rate,
8378 .mac_time = jiffies,
8379 .received_channel =
8380 pkt->u.frame.received_channel,
8381 .freq =
8382 (pkt->u.frame.
8383 control & (1 << 0)) ?
8384 LIBIPW_24GHZ_BAND :
8385 LIBIPW_52GHZ_BAND,
8386 .len = le16_to_cpu(pkt->u.frame.length),
8387 };
8388
8389 if (stats.rssi != 0)
8390 stats.mask |= LIBIPW_STATMASK_RSSI;
8391 if (stats.signal != 0)
8392 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8393 if (stats.noise != 0)
8394 stats.mask |= LIBIPW_STATMASK_NOISE;
8395 if (stats.rate != 0)
8396 stats.mask |= LIBIPW_STATMASK_RATE;
8397
8398 priv->rx_packets++;
8399
8400 #ifdef CONFIG_IPW2200_PROMISCUOUS
8401 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8402 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8403 #endif
8404
8405 #ifdef CONFIG_IPW2200_MONITOR
8406 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8407 #ifdef CONFIG_IPW2200_RADIOTAP
8408
8409 ipw_handle_data_packet_monitor(priv,
8410 rxb,
8411 &stats);
8412 #else
8413 ipw_handle_data_packet(priv, rxb,
8414 &stats);
8415 #endif
8416 break;
8417 }
8418 #endif
8419
8420 header =
8421 (struct libipw_hdr_4addr *)(rxb->skb->
8422 data +
8423 IPW_RX_FRAME_SIZE);
8424 /* TODO: Check Ad-Hoc dest/source and make sure
8425 * that we are actually parsing these packets
8426 * correctly -- we should probably use the
8427 * frame control of the packet and disregard
8428 * the current iw_mode */
8429
8430 network_packet =
8431 is_network_packet(priv, header);
8432 if (network_packet && priv->assoc_network) {
8433 priv->assoc_network->stats.rssi =
8434 stats.rssi;
8435 priv->exp_avg_rssi =
8436 exponential_average(priv->exp_avg_rssi,
8437 stats.rssi, DEPTH_RSSI);
8438 }
8439
8440 IPW_DEBUG_RX("Frame: len=%u\n",
8441 le16_to_cpu(pkt->u.frame.length));
8442
8443 if (le16_to_cpu(pkt->u.frame.length) <
8444 libipw_get_hdrlen(le16_to_cpu(
8445 header->frame_ctl))) {
8446 IPW_DEBUG_DROP
8447 ("Received packet is too small. "
8448 "Dropping.\n");
8449 priv->net_dev->stats.rx_errors++;
8450 priv->wstats.discard.misc++;
8451 break;
8452 }
8453
8454 switch (WLAN_FC_GET_TYPE
8455 (le16_to_cpu(header->frame_ctl))) {
8456
8457 case IEEE80211_FTYPE_MGMT:
8458 ipw_handle_mgmt_packet(priv, rxb,
8459 &stats);
8460 break;
8461
8462 case IEEE80211_FTYPE_CTL:
8463 break;
8464
8465 case IEEE80211_FTYPE_DATA:
8466 if (unlikely(!network_packet ||
8467 is_duplicate_packet(priv,
8468 header)))
8469 {
8470 IPW_DEBUG_DROP("Dropping: "
8471 "%pM, "
8472 "%pM, "
8473 "%pM\n",
8474 header->addr1,
8475 header->addr2,
8476 header->addr3);
8477 break;
8478 }
8479
8480 ipw_handle_data_packet(priv, rxb,
8481 &stats);
8482
8483 break;
8484 }
8485 break;
8486 }
8487
8488 case RX_HOST_NOTIFICATION_TYPE:{
8489 IPW_DEBUG_RX
8490 ("Notification: subtype=%02X flags=%02X size=%d\n",
8491 pkt->u.notification.subtype,
8492 pkt->u.notification.flags,
8493 le16_to_cpu(pkt->u.notification.size));
8494 ipw_rx_notification(priv, &pkt->u.notification);
8495 break;
8496 }
8497
8498 default:
8499 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8500 pkt->header.message_type);
8501 break;
8502 }
8503
8504 /* For now we just don't re-use anything. We can tweak this
8505 * later to try and re-use notification packets and SKBs that
8506 * fail to Rx correctly */
8507 if (rxb->skb != NULL) {
8508 dev_kfree_skb_any(rxb->skb);
8509 rxb->skb = NULL;
8510 }
8511
8512 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8513 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8514 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8515
8516 i = (i + 1) % RX_QUEUE_SIZE;
8517
8518 /* If there are a lot of unsued frames, restock the Rx queue
8519 * so the ucode won't assert */
8520 if (fill_rx) {
8521 priv->rxq->read = i;
8522 ipw_rx_queue_replenish(priv);
8523 }
8524 }
8525
8526 /* Backtrack one entry */
8527 priv->rxq->read = i;
8528 ipw_rx_queue_restock(priv);
8529 }
8530
8531 #define DEFAULT_RTS_THRESHOLD 2304U
8532 #define MIN_RTS_THRESHOLD 1U
8533 #define MAX_RTS_THRESHOLD 2304U
8534 #define DEFAULT_BEACON_INTERVAL 100U
8535 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8536 #define DEFAULT_LONG_RETRY_LIMIT 4U
8537
8538 /**
8539 * ipw_sw_reset
8540 * @option: options to control different reset behaviour
8541 * 0 = reset everything except the 'disable' module_param
8542 * 1 = reset everything and print out driver info (for probe only)
8543 * 2 = reset everything
8544 */
8545 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8546 {
8547 int band, modulation;
8548 int old_mode = priv->ieee->iw_mode;
8549
8550 /* Initialize module parameter values here */
8551 priv->config = 0;
8552
8553 /* We default to disabling the LED code as right now it causes
8554 * too many systems to lock up... */
8555 if (!led_support)
8556 priv->config |= CFG_NO_LED;
8557
8558 if (associate)
8559 priv->config |= CFG_ASSOCIATE;
8560 else
8561 IPW_DEBUG_INFO("Auto associate disabled.\n");
8562
8563 if (auto_create)
8564 priv->config |= CFG_ADHOC_CREATE;
8565 else
8566 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8567
8568 priv->config &= ~CFG_STATIC_ESSID;
8569 priv->essid_len = 0;
8570 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8571
8572 if (disable && option) {
8573 priv->status |= STATUS_RF_KILL_SW;
8574 IPW_DEBUG_INFO("Radio disabled.\n");
8575 }
8576
8577 if (default_channel != 0) {
8578 priv->config |= CFG_STATIC_CHANNEL;
8579 priv->channel = default_channel;
8580 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8581 /* TODO: Validate that provided channel is in range */
8582 }
8583 #ifdef CONFIG_IPW2200_QOS
8584 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8585 burst_duration_CCK, burst_duration_OFDM);
8586 #endif /* CONFIG_IPW2200_QOS */
8587
8588 switch (network_mode) {
8589 case 1:
8590 priv->ieee->iw_mode = IW_MODE_ADHOC;
8591 priv->net_dev->type = ARPHRD_ETHER;
8592
8593 break;
8594 #ifdef CONFIG_IPW2200_MONITOR
8595 case 2:
8596 priv->ieee->iw_mode = IW_MODE_MONITOR;
8597 #ifdef CONFIG_IPW2200_RADIOTAP
8598 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8599 #else
8600 priv->net_dev->type = ARPHRD_IEEE80211;
8601 #endif
8602 break;
8603 #endif
8604 default:
8605 case 0:
8606 priv->net_dev->type = ARPHRD_ETHER;
8607 priv->ieee->iw_mode = IW_MODE_INFRA;
8608 break;
8609 }
8610
8611 if (hwcrypto) {
8612 priv->ieee->host_encrypt = 0;
8613 priv->ieee->host_encrypt_msdu = 0;
8614 priv->ieee->host_decrypt = 0;
8615 priv->ieee->host_mc_decrypt = 0;
8616 }
8617 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8618
8619 /* IPW2200/2915 is abled to do hardware fragmentation. */
8620 priv->ieee->host_open_frag = 0;
8621
8622 if ((priv->pci_dev->device == 0x4223) ||
8623 (priv->pci_dev->device == 0x4224)) {
8624 if (option == 1)
8625 printk(KERN_INFO DRV_NAME
8626 ": Detected Intel PRO/Wireless 2915ABG Network "
8627 "Connection\n");
8628 priv->ieee->abg_true = 1;
8629 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8630 modulation = LIBIPW_OFDM_MODULATION |
8631 LIBIPW_CCK_MODULATION;
8632 priv->adapter = IPW_2915ABG;
8633 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8634 } else {
8635 if (option == 1)
8636 printk(KERN_INFO DRV_NAME
8637 ": Detected Intel PRO/Wireless 2200BG Network "
8638 "Connection\n");
8639
8640 priv->ieee->abg_true = 0;
8641 band = LIBIPW_24GHZ_BAND;
8642 modulation = LIBIPW_OFDM_MODULATION |
8643 LIBIPW_CCK_MODULATION;
8644 priv->adapter = IPW_2200BG;
8645 priv->ieee->mode = IEEE_G | IEEE_B;
8646 }
8647
8648 priv->ieee->freq_band = band;
8649 priv->ieee->modulation = modulation;
8650
8651 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8652
8653 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8654 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8655
8656 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8657 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8658 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8659
8660 /* If power management is turned on, default to AC mode */
8661 priv->power_mode = IPW_POWER_AC;
8662 priv->tx_power = IPW_TX_POWER_DEFAULT;
8663
8664 return old_mode == priv->ieee->iw_mode;
8665 }
8666
8667 /*
8668 * This file defines the Wireless Extension handlers. It does not
8669 * define any methods of hardware manipulation and relies on the
8670 * functions defined in ipw_main to provide the HW interaction.
8671 *
8672 * The exception to this is the use of the ipw_get_ordinal()
8673 * function used to poll the hardware vs. making unecessary calls.
8674 *
8675 */
8676
8677 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8678 {
8679 if (channel == 0) {
8680 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8681 priv->config &= ~CFG_STATIC_CHANNEL;
8682 IPW_DEBUG_ASSOC("Attempting to associate with new "
8683 "parameters.\n");
8684 ipw_associate(priv);
8685 return 0;
8686 }
8687
8688 priv->config |= CFG_STATIC_CHANNEL;
8689
8690 if (priv->channel == channel) {
8691 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8692 channel);
8693 return 0;
8694 }
8695
8696 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8697 priv->channel = channel;
8698
8699 #ifdef CONFIG_IPW2200_MONITOR
8700 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8701 int i;
8702 if (priv->status & STATUS_SCANNING) {
8703 IPW_DEBUG_SCAN("Scan abort triggered due to "
8704 "channel change.\n");
8705 ipw_abort_scan(priv);
8706 }
8707
8708 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8709 udelay(10);
8710
8711 if (priv->status & STATUS_SCANNING)
8712 IPW_DEBUG_SCAN("Still scanning...\n");
8713 else
8714 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8715 1000 - i);
8716
8717 return 0;
8718 }
8719 #endif /* CONFIG_IPW2200_MONITOR */
8720
8721 /* Network configuration changed -- force [re]association */
8722 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8723 if (!ipw_disassociate(priv))
8724 ipw_associate(priv);
8725
8726 return 0;
8727 }
8728
8729 static int ipw_wx_set_freq(struct net_device *dev,
8730 struct iw_request_info *info,
8731 union iwreq_data *wrqu, char *extra)
8732 {
8733 struct ipw_priv *priv = libipw_priv(dev);
8734 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8735 struct iw_freq *fwrq = &wrqu->freq;
8736 int ret = 0, i;
8737 u8 channel, flags;
8738 int band;
8739
8740 if (fwrq->m == 0) {
8741 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8742 mutex_lock(&priv->mutex);
8743 ret = ipw_set_channel(priv, 0);
8744 mutex_unlock(&priv->mutex);
8745 return ret;
8746 }
8747 /* if setting by freq convert to channel */
8748 if (fwrq->e == 1) {
8749 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8750 if (channel == 0)
8751 return -EINVAL;
8752 } else
8753 channel = fwrq->m;
8754
8755 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8756 return -EINVAL;
8757
8758 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8759 i = libipw_channel_to_index(priv->ieee, channel);
8760 if (i == -1)
8761 return -EINVAL;
8762
8763 flags = (band == LIBIPW_24GHZ_BAND) ?
8764 geo->bg[i].flags : geo->a[i].flags;
8765 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8766 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8767 return -EINVAL;
8768 }
8769 }
8770
8771 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8772 mutex_lock(&priv->mutex);
8773 ret = ipw_set_channel(priv, channel);
8774 mutex_unlock(&priv->mutex);
8775 return ret;
8776 }
8777
8778 static int ipw_wx_get_freq(struct net_device *dev,
8779 struct iw_request_info *info,
8780 union iwreq_data *wrqu, char *extra)
8781 {
8782 struct ipw_priv *priv = libipw_priv(dev);
8783
8784 wrqu->freq.e = 0;
8785
8786 /* If we are associated, trying to associate, or have a statically
8787 * configured CHANNEL then return that; otherwise return ANY */
8788 mutex_lock(&priv->mutex);
8789 if (priv->config & CFG_STATIC_CHANNEL ||
8790 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8791 int i;
8792
8793 i = libipw_channel_to_index(priv->ieee, priv->channel);
8794 BUG_ON(i == -1);
8795 wrqu->freq.e = 1;
8796
8797 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8798 case LIBIPW_52GHZ_BAND:
8799 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8800 break;
8801
8802 case LIBIPW_24GHZ_BAND:
8803 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8804 break;
8805
8806 default:
8807 BUG();
8808 }
8809 } else
8810 wrqu->freq.m = 0;
8811
8812 mutex_unlock(&priv->mutex);
8813 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8814 return 0;
8815 }
8816
8817 static int ipw_wx_set_mode(struct net_device *dev,
8818 struct iw_request_info *info,
8819 union iwreq_data *wrqu, char *extra)
8820 {
8821 struct ipw_priv *priv = libipw_priv(dev);
8822 int err = 0;
8823
8824 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8825
8826 switch (wrqu->mode) {
8827 #ifdef CONFIG_IPW2200_MONITOR
8828 case IW_MODE_MONITOR:
8829 #endif
8830 case IW_MODE_ADHOC:
8831 case IW_MODE_INFRA:
8832 break;
8833 case IW_MODE_AUTO:
8834 wrqu->mode = IW_MODE_INFRA;
8835 break;
8836 default:
8837 return -EINVAL;
8838 }
8839 if (wrqu->mode == priv->ieee->iw_mode)
8840 return 0;
8841
8842 mutex_lock(&priv->mutex);
8843
8844 ipw_sw_reset(priv, 0);
8845
8846 #ifdef CONFIG_IPW2200_MONITOR
8847 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8848 priv->net_dev->type = ARPHRD_ETHER;
8849
8850 if (wrqu->mode == IW_MODE_MONITOR)
8851 #ifdef CONFIG_IPW2200_RADIOTAP
8852 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8853 #else
8854 priv->net_dev->type = ARPHRD_IEEE80211;
8855 #endif
8856 #endif /* CONFIG_IPW2200_MONITOR */
8857
8858 /* Free the existing firmware and reset the fw_loaded
8859 * flag so ipw_load() will bring in the new firmware */
8860 free_firmware();
8861
8862 priv->ieee->iw_mode = wrqu->mode;
8863
8864 queue_work(priv->workqueue, &priv->adapter_restart);
8865 mutex_unlock(&priv->mutex);
8866 return err;
8867 }
8868
8869 static int ipw_wx_get_mode(struct net_device *dev,
8870 struct iw_request_info *info,
8871 union iwreq_data *wrqu, char *extra)
8872 {
8873 struct ipw_priv *priv = libipw_priv(dev);
8874 mutex_lock(&priv->mutex);
8875 wrqu->mode = priv->ieee->iw_mode;
8876 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8877 mutex_unlock(&priv->mutex);
8878 return 0;
8879 }
8880
8881 /* Values are in microsecond */
8882 static const s32 timeout_duration[] = {
8883 350000,
8884 250000,
8885 75000,
8886 37000,
8887 25000,
8888 };
8889
8890 static const s32 period_duration[] = {
8891 400000,
8892 700000,
8893 1000000,
8894 1000000,
8895 1000000
8896 };
8897
8898 static int ipw_wx_get_range(struct net_device *dev,
8899 struct iw_request_info *info,
8900 union iwreq_data *wrqu, char *extra)
8901 {
8902 struct ipw_priv *priv = libipw_priv(dev);
8903 struct iw_range *range = (struct iw_range *)extra;
8904 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8905 int i = 0, j;
8906
8907 wrqu->data.length = sizeof(*range);
8908 memset(range, 0, sizeof(*range));
8909
8910 /* 54Mbs == ~27 Mb/s real (802.11g) */
8911 range->throughput = 27 * 1000 * 1000;
8912
8913 range->max_qual.qual = 100;
8914 /* TODO: Find real max RSSI and stick here */
8915 range->max_qual.level = 0;
8916 range->max_qual.noise = 0;
8917 range->max_qual.updated = 7; /* Updated all three */
8918
8919 range->avg_qual.qual = 70;
8920 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8921 range->avg_qual.level = 0; /* FIXME to real average level */
8922 range->avg_qual.noise = 0;
8923 range->avg_qual.updated = 7; /* Updated all three */
8924 mutex_lock(&priv->mutex);
8925 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8926
8927 for (i = 0; i < range->num_bitrates; i++)
8928 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8929 500000;
8930
8931 range->max_rts = DEFAULT_RTS_THRESHOLD;
8932 range->min_frag = MIN_FRAG_THRESHOLD;
8933 range->max_frag = MAX_FRAG_THRESHOLD;
8934
8935 range->encoding_size[0] = 5;
8936 range->encoding_size[1] = 13;
8937 range->num_encoding_sizes = 2;
8938 range->max_encoding_tokens = WEP_KEYS;
8939
8940 /* Set the Wireless Extension versions */
8941 range->we_version_compiled = WIRELESS_EXT;
8942 range->we_version_source = 18;
8943
8944 i = 0;
8945 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8946 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8947 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8948 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8949 continue;
8950
8951 range->freq[i].i = geo->bg[j].channel;
8952 range->freq[i].m = geo->bg[j].freq * 100000;
8953 range->freq[i].e = 1;
8954 i++;
8955 }
8956 }
8957
8958 if (priv->ieee->mode & IEEE_A) {
8959 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8960 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8961 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8962 continue;
8963
8964 range->freq[i].i = geo->a[j].channel;
8965 range->freq[i].m = geo->a[j].freq * 100000;
8966 range->freq[i].e = 1;
8967 i++;
8968 }
8969 }
8970
8971 range->num_channels = i;
8972 range->num_frequency = i;
8973
8974 mutex_unlock(&priv->mutex);
8975
8976 /* Event capability (kernel + driver) */
8977 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8978 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8979 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8980 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8981 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8982
8983 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8984 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8985
8986 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8987
8988 IPW_DEBUG_WX("GET Range\n");
8989 return 0;
8990 }
8991
8992 static int ipw_wx_set_wap(struct net_device *dev,
8993 struct iw_request_info *info,
8994 union iwreq_data *wrqu, char *extra)
8995 {
8996 struct ipw_priv *priv = libipw_priv(dev);
8997
8998 static const unsigned char any[] = {
8999 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9000 };
9001 static const unsigned char off[] = {
9002 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9003 };
9004
9005 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9006 return -EINVAL;
9007 mutex_lock(&priv->mutex);
9008 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9009 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9010 /* we disable mandatory BSSID association */
9011 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9012 priv->config &= ~CFG_STATIC_BSSID;
9013 IPW_DEBUG_ASSOC("Attempting to associate with new "
9014 "parameters.\n");
9015 ipw_associate(priv);
9016 mutex_unlock(&priv->mutex);
9017 return 0;
9018 }
9019
9020 priv->config |= CFG_STATIC_BSSID;
9021 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9022 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9023 mutex_unlock(&priv->mutex);
9024 return 0;
9025 }
9026
9027 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9028 wrqu->ap_addr.sa_data);
9029
9030 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9031
9032 /* Network configuration changed -- force [re]association */
9033 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9034 if (!ipw_disassociate(priv))
9035 ipw_associate(priv);
9036
9037 mutex_unlock(&priv->mutex);
9038 return 0;
9039 }
9040
9041 static int ipw_wx_get_wap(struct net_device *dev,
9042 struct iw_request_info *info,
9043 union iwreq_data *wrqu, char *extra)
9044 {
9045 struct ipw_priv *priv = libipw_priv(dev);
9046
9047 /* If we are associated, trying to associate, or have a statically
9048 * configured BSSID then return that; otherwise return ANY */
9049 mutex_lock(&priv->mutex);
9050 if (priv->config & CFG_STATIC_BSSID ||
9051 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9052 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9053 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9054 } else
9055 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9056
9057 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9058 wrqu->ap_addr.sa_data);
9059 mutex_unlock(&priv->mutex);
9060 return 0;
9061 }
9062
9063 static int ipw_wx_set_essid(struct net_device *dev,
9064 struct iw_request_info *info,
9065 union iwreq_data *wrqu, char *extra)
9066 {
9067 struct ipw_priv *priv = libipw_priv(dev);
9068 int length;
9069 DECLARE_SSID_BUF(ssid);
9070
9071 mutex_lock(&priv->mutex);
9072
9073 if (!wrqu->essid.flags)
9074 {
9075 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9076 ipw_disassociate(priv);
9077 priv->config &= ~CFG_STATIC_ESSID;
9078 ipw_associate(priv);
9079 mutex_unlock(&priv->mutex);
9080 return 0;
9081 }
9082
9083 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9084
9085 priv->config |= CFG_STATIC_ESSID;
9086
9087 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9088 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9089 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9090 mutex_unlock(&priv->mutex);
9091 return 0;
9092 }
9093
9094 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9095 print_ssid(ssid, extra, length), length);
9096
9097 priv->essid_len = length;
9098 memcpy(priv->essid, extra, priv->essid_len);
9099
9100 /* Network configuration changed -- force [re]association */
9101 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9102 if (!ipw_disassociate(priv))
9103 ipw_associate(priv);
9104
9105 mutex_unlock(&priv->mutex);
9106 return 0;
9107 }
9108
9109 static int ipw_wx_get_essid(struct net_device *dev,
9110 struct iw_request_info *info,
9111 union iwreq_data *wrqu, char *extra)
9112 {
9113 struct ipw_priv *priv = libipw_priv(dev);
9114 DECLARE_SSID_BUF(ssid);
9115
9116 /* If we are associated, trying to associate, or have a statically
9117 * configured ESSID then return that; otherwise return ANY */
9118 mutex_lock(&priv->mutex);
9119 if (priv->config & CFG_STATIC_ESSID ||
9120 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9121 IPW_DEBUG_WX("Getting essid: '%s'\n",
9122 print_ssid(ssid, priv->essid, priv->essid_len));
9123 memcpy(extra, priv->essid, priv->essid_len);
9124 wrqu->essid.length = priv->essid_len;
9125 wrqu->essid.flags = 1; /* active */
9126 } else {
9127 IPW_DEBUG_WX("Getting essid: ANY\n");
9128 wrqu->essid.length = 0;
9129 wrqu->essid.flags = 0; /* active */
9130 }
9131 mutex_unlock(&priv->mutex);
9132 return 0;
9133 }
9134
9135 static int ipw_wx_set_nick(struct net_device *dev,
9136 struct iw_request_info *info,
9137 union iwreq_data *wrqu, char *extra)
9138 {
9139 struct ipw_priv *priv = libipw_priv(dev);
9140
9141 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9142 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9143 return -E2BIG;
9144 mutex_lock(&priv->mutex);
9145 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9146 memset(priv->nick, 0, sizeof(priv->nick));
9147 memcpy(priv->nick, extra, wrqu->data.length);
9148 IPW_DEBUG_TRACE("<<\n");
9149 mutex_unlock(&priv->mutex);
9150 return 0;
9151
9152 }
9153
9154 static int ipw_wx_get_nick(struct net_device *dev,
9155 struct iw_request_info *info,
9156 union iwreq_data *wrqu, char *extra)
9157 {
9158 struct ipw_priv *priv = libipw_priv(dev);
9159 IPW_DEBUG_WX("Getting nick\n");
9160 mutex_lock(&priv->mutex);
9161 wrqu->data.length = strlen(priv->nick);
9162 memcpy(extra, priv->nick, wrqu->data.length);
9163 wrqu->data.flags = 1; /* active */
9164 mutex_unlock(&priv->mutex);
9165 return 0;
9166 }
9167
9168 static int ipw_wx_set_sens(struct net_device *dev,
9169 struct iw_request_info *info,
9170 union iwreq_data *wrqu, char *extra)
9171 {
9172 struct ipw_priv *priv = libipw_priv(dev);
9173 int err = 0;
9174
9175 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9176 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9177 mutex_lock(&priv->mutex);
9178
9179 if (wrqu->sens.fixed == 0)
9180 {
9181 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9182 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9183 goto out;
9184 }
9185 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9186 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9187 err = -EINVAL;
9188 goto out;
9189 }
9190
9191 priv->roaming_threshold = wrqu->sens.value;
9192 priv->disassociate_threshold = 3*wrqu->sens.value;
9193 out:
9194 mutex_unlock(&priv->mutex);
9195 return err;
9196 }
9197
9198 static int ipw_wx_get_sens(struct net_device *dev,
9199 struct iw_request_info *info,
9200 union iwreq_data *wrqu, char *extra)
9201 {
9202 struct ipw_priv *priv = libipw_priv(dev);
9203 mutex_lock(&priv->mutex);
9204 wrqu->sens.fixed = 1;
9205 wrqu->sens.value = priv->roaming_threshold;
9206 mutex_unlock(&priv->mutex);
9207
9208 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9209 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9210
9211 return 0;
9212 }
9213
9214 static int ipw_wx_set_rate(struct net_device *dev,
9215 struct iw_request_info *info,
9216 union iwreq_data *wrqu, char *extra)
9217 {
9218 /* TODO: We should use semaphores or locks for access to priv */
9219 struct ipw_priv *priv = libipw_priv(dev);
9220 u32 target_rate = wrqu->bitrate.value;
9221 u32 fixed, mask;
9222
9223 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9224 /* value = X, fixed = 1 means only rate X */
9225 /* value = X, fixed = 0 means all rates lower equal X */
9226
9227 if (target_rate == -1) {
9228 fixed = 0;
9229 mask = LIBIPW_DEFAULT_RATES_MASK;
9230 /* Now we should reassociate */
9231 goto apply;
9232 }
9233
9234 mask = 0;
9235 fixed = wrqu->bitrate.fixed;
9236
9237 if (target_rate == 1000000 || !fixed)
9238 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9239 if (target_rate == 1000000)
9240 goto apply;
9241
9242 if (target_rate == 2000000 || !fixed)
9243 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9244 if (target_rate == 2000000)
9245 goto apply;
9246
9247 if (target_rate == 5500000 || !fixed)
9248 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9249 if (target_rate == 5500000)
9250 goto apply;
9251
9252 if (target_rate == 6000000 || !fixed)
9253 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9254 if (target_rate == 6000000)
9255 goto apply;
9256
9257 if (target_rate == 9000000 || !fixed)
9258 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9259 if (target_rate == 9000000)
9260 goto apply;
9261
9262 if (target_rate == 11000000 || !fixed)
9263 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9264 if (target_rate == 11000000)
9265 goto apply;
9266
9267 if (target_rate == 12000000 || !fixed)
9268 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9269 if (target_rate == 12000000)
9270 goto apply;
9271
9272 if (target_rate == 18000000 || !fixed)
9273 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9274 if (target_rate == 18000000)
9275 goto apply;
9276
9277 if (target_rate == 24000000 || !fixed)
9278 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9279 if (target_rate == 24000000)
9280 goto apply;
9281
9282 if (target_rate == 36000000 || !fixed)
9283 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9284 if (target_rate == 36000000)
9285 goto apply;
9286
9287 if (target_rate == 48000000 || !fixed)
9288 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9289 if (target_rate == 48000000)
9290 goto apply;
9291
9292 if (target_rate == 54000000 || !fixed)
9293 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9294 if (target_rate == 54000000)
9295 goto apply;
9296
9297 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9298 return -EINVAL;
9299
9300 apply:
9301 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9302 mask, fixed ? "fixed" : "sub-rates");
9303 mutex_lock(&priv->mutex);
9304 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9305 priv->config &= ~CFG_FIXED_RATE;
9306 ipw_set_fixed_rate(priv, priv->ieee->mode);
9307 } else
9308 priv->config |= CFG_FIXED_RATE;
9309
9310 if (priv->rates_mask == mask) {
9311 IPW_DEBUG_WX("Mask set to current mask.\n");
9312 mutex_unlock(&priv->mutex);
9313 return 0;
9314 }
9315
9316 priv->rates_mask = mask;
9317
9318 /* Network configuration changed -- force [re]association */
9319 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9320 if (!ipw_disassociate(priv))
9321 ipw_associate(priv);
9322
9323 mutex_unlock(&priv->mutex);
9324 return 0;
9325 }
9326
9327 static int ipw_wx_get_rate(struct net_device *dev,
9328 struct iw_request_info *info,
9329 union iwreq_data *wrqu, char *extra)
9330 {
9331 struct ipw_priv *priv = libipw_priv(dev);
9332 mutex_lock(&priv->mutex);
9333 wrqu->bitrate.value = priv->last_rate;
9334 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9335 mutex_unlock(&priv->mutex);
9336 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9337 return 0;
9338 }
9339
9340 static int ipw_wx_set_rts(struct net_device *dev,
9341 struct iw_request_info *info,
9342 union iwreq_data *wrqu, char *extra)
9343 {
9344 struct ipw_priv *priv = libipw_priv(dev);
9345 mutex_lock(&priv->mutex);
9346 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9347 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9348 else {
9349 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9350 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9351 mutex_unlock(&priv->mutex);
9352 return -EINVAL;
9353 }
9354 priv->rts_threshold = wrqu->rts.value;
9355 }
9356
9357 ipw_send_rts_threshold(priv, priv->rts_threshold);
9358 mutex_unlock(&priv->mutex);
9359 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9360 return 0;
9361 }
9362
9363 static int ipw_wx_get_rts(struct net_device *dev,
9364 struct iw_request_info *info,
9365 union iwreq_data *wrqu, char *extra)
9366 {
9367 struct ipw_priv *priv = libipw_priv(dev);
9368 mutex_lock(&priv->mutex);
9369 wrqu->rts.value = priv->rts_threshold;
9370 wrqu->rts.fixed = 0; /* no auto select */
9371 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9372 mutex_unlock(&priv->mutex);
9373 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9374 return 0;
9375 }
9376
9377 static int ipw_wx_set_txpow(struct net_device *dev,
9378 struct iw_request_info *info,
9379 union iwreq_data *wrqu, char *extra)
9380 {
9381 struct ipw_priv *priv = libipw_priv(dev);
9382 int err = 0;
9383
9384 mutex_lock(&priv->mutex);
9385 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9386 err = -EINPROGRESS;
9387 goto out;
9388 }
9389
9390 if (!wrqu->power.fixed)
9391 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9392
9393 if (wrqu->power.flags != IW_TXPOW_DBM) {
9394 err = -EINVAL;
9395 goto out;
9396 }
9397
9398 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9399 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9400 err = -EINVAL;
9401 goto out;
9402 }
9403
9404 priv->tx_power = wrqu->power.value;
9405 err = ipw_set_tx_power(priv);
9406 out:
9407 mutex_unlock(&priv->mutex);
9408 return err;
9409 }
9410
9411 static int ipw_wx_get_txpow(struct net_device *dev,
9412 struct iw_request_info *info,
9413 union iwreq_data *wrqu, char *extra)
9414 {
9415 struct ipw_priv *priv = libipw_priv(dev);
9416 mutex_lock(&priv->mutex);
9417 wrqu->power.value = priv->tx_power;
9418 wrqu->power.fixed = 1;
9419 wrqu->power.flags = IW_TXPOW_DBM;
9420 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9421 mutex_unlock(&priv->mutex);
9422
9423 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9424 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9425
9426 return 0;
9427 }
9428
9429 static int ipw_wx_set_frag(struct net_device *dev,
9430 struct iw_request_info *info,
9431 union iwreq_data *wrqu, char *extra)
9432 {
9433 struct ipw_priv *priv = libipw_priv(dev);
9434 mutex_lock(&priv->mutex);
9435 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9436 priv->ieee->fts = DEFAULT_FTS;
9437 else {
9438 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9439 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9440 mutex_unlock(&priv->mutex);
9441 return -EINVAL;
9442 }
9443
9444 priv->ieee->fts = wrqu->frag.value & ~0x1;
9445 }
9446
9447 ipw_send_frag_threshold(priv, wrqu->frag.value);
9448 mutex_unlock(&priv->mutex);
9449 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9450 return 0;
9451 }
9452
9453 static int ipw_wx_get_frag(struct net_device *dev,
9454 struct iw_request_info *info,
9455 union iwreq_data *wrqu, char *extra)
9456 {
9457 struct ipw_priv *priv = libipw_priv(dev);
9458 mutex_lock(&priv->mutex);
9459 wrqu->frag.value = priv->ieee->fts;
9460 wrqu->frag.fixed = 0; /* no auto select */
9461 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9462 mutex_unlock(&priv->mutex);
9463 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9464
9465 return 0;
9466 }
9467
9468 static int ipw_wx_set_retry(struct net_device *dev,
9469 struct iw_request_info *info,
9470 union iwreq_data *wrqu, char *extra)
9471 {
9472 struct ipw_priv *priv = libipw_priv(dev);
9473
9474 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9475 return -EINVAL;
9476
9477 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9478 return 0;
9479
9480 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9481 return -EINVAL;
9482
9483 mutex_lock(&priv->mutex);
9484 if (wrqu->retry.flags & IW_RETRY_SHORT)
9485 priv->short_retry_limit = (u8) wrqu->retry.value;
9486 else if (wrqu->retry.flags & IW_RETRY_LONG)
9487 priv->long_retry_limit = (u8) wrqu->retry.value;
9488 else {
9489 priv->short_retry_limit = (u8) wrqu->retry.value;
9490 priv->long_retry_limit = (u8) wrqu->retry.value;
9491 }
9492
9493 ipw_send_retry_limit(priv, priv->short_retry_limit,
9494 priv->long_retry_limit);
9495 mutex_unlock(&priv->mutex);
9496 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9497 priv->short_retry_limit, priv->long_retry_limit);
9498 return 0;
9499 }
9500
9501 static int ipw_wx_get_retry(struct net_device *dev,
9502 struct iw_request_info *info,
9503 union iwreq_data *wrqu, char *extra)
9504 {
9505 struct ipw_priv *priv = libipw_priv(dev);
9506
9507 mutex_lock(&priv->mutex);
9508 wrqu->retry.disabled = 0;
9509
9510 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9511 mutex_unlock(&priv->mutex);
9512 return -EINVAL;
9513 }
9514
9515 if (wrqu->retry.flags & IW_RETRY_LONG) {
9516 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9517 wrqu->retry.value = priv->long_retry_limit;
9518 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9519 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9520 wrqu->retry.value = priv->short_retry_limit;
9521 } else {
9522 wrqu->retry.flags = IW_RETRY_LIMIT;
9523 wrqu->retry.value = priv->short_retry_limit;
9524 }
9525 mutex_unlock(&priv->mutex);
9526
9527 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9528
9529 return 0;
9530 }
9531
9532 static int ipw_wx_set_scan(struct net_device *dev,
9533 struct iw_request_info *info,
9534 union iwreq_data *wrqu, char *extra)
9535 {
9536 struct ipw_priv *priv = libipw_priv(dev);
9537 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9538 struct delayed_work *work = NULL;
9539
9540 mutex_lock(&priv->mutex);
9541
9542 priv->user_requested_scan = 1;
9543
9544 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9545 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9546 int len = min((int)req->essid_len,
9547 (int)sizeof(priv->direct_scan_ssid));
9548 memcpy(priv->direct_scan_ssid, req->essid, len);
9549 priv->direct_scan_ssid_len = len;
9550 work = &priv->request_direct_scan;
9551 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9552 work = &priv->request_passive_scan;
9553 }
9554 } else {
9555 /* Normal active broadcast scan */
9556 work = &priv->request_scan;
9557 }
9558
9559 mutex_unlock(&priv->mutex);
9560
9561 IPW_DEBUG_WX("Start scan\n");
9562
9563 queue_delayed_work(priv->workqueue, work, 0);
9564
9565 return 0;
9566 }
9567
9568 static int ipw_wx_get_scan(struct net_device *dev,
9569 struct iw_request_info *info,
9570 union iwreq_data *wrqu, char *extra)
9571 {
9572 struct ipw_priv *priv = libipw_priv(dev);
9573 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9574 }
9575
9576 static int ipw_wx_set_encode(struct net_device *dev,
9577 struct iw_request_info *info,
9578 union iwreq_data *wrqu, char *key)
9579 {
9580 struct ipw_priv *priv = libipw_priv(dev);
9581 int ret;
9582 u32 cap = priv->capability;
9583
9584 mutex_lock(&priv->mutex);
9585 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9586
9587 /* In IBSS mode, we need to notify the firmware to update
9588 * the beacon info after we changed the capability. */
9589 if (cap != priv->capability &&
9590 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9591 priv->status & STATUS_ASSOCIATED)
9592 ipw_disassociate(priv);
9593
9594 mutex_unlock(&priv->mutex);
9595 return ret;
9596 }
9597
9598 static int ipw_wx_get_encode(struct net_device *dev,
9599 struct iw_request_info *info,
9600 union iwreq_data *wrqu, char *key)
9601 {
9602 struct ipw_priv *priv = libipw_priv(dev);
9603 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9604 }
9605
9606 static int ipw_wx_set_power(struct net_device *dev,
9607 struct iw_request_info *info,
9608 union iwreq_data *wrqu, char *extra)
9609 {
9610 struct ipw_priv *priv = libipw_priv(dev);
9611 int err;
9612 mutex_lock(&priv->mutex);
9613 if (wrqu->power.disabled) {
9614 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9615 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9616 if (err) {
9617 IPW_DEBUG_WX("failed setting power mode.\n");
9618 mutex_unlock(&priv->mutex);
9619 return err;
9620 }
9621 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9622 mutex_unlock(&priv->mutex);
9623 return 0;
9624 }
9625
9626 switch (wrqu->power.flags & IW_POWER_MODE) {
9627 case IW_POWER_ON: /* If not specified */
9628 case IW_POWER_MODE: /* If set all mask */
9629 case IW_POWER_ALL_R: /* If explicitly state all */
9630 break;
9631 default: /* Otherwise we don't support it */
9632 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9633 wrqu->power.flags);
9634 mutex_unlock(&priv->mutex);
9635 return -EOPNOTSUPP;
9636 }
9637
9638 /* If the user hasn't specified a power management mode yet, default
9639 * to BATTERY */
9640 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9641 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9642 else
9643 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9644
9645 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9646 if (err) {
9647 IPW_DEBUG_WX("failed setting power mode.\n");
9648 mutex_unlock(&priv->mutex);
9649 return err;
9650 }
9651
9652 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9653 mutex_unlock(&priv->mutex);
9654 return 0;
9655 }
9656
9657 static int ipw_wx_get_power(struct net_device *dev,
9658 struct iw_request_info *info,
9659 union iwreq_data *wrqu, char *extra)
9660 {
9661 struct ipw_priv *priv = libipw_priv(dev);
9662 mutex_lock(&priv->mutex);
9663 if (!(priv->power_mode & IPW_POWER_ENABLED))
9664 wrqu->power.disabled = 1;
9665 else
9666 wrqu->power.disabled = 0;
9667
9668 mutex_unlock(&priv->mutex);
9669 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9670
9671 return 0;
9672 }
9673
9674 static int ipw_wx_set_powermode(struct net_device *dev,
9675 struct iw_request_info *info,
9676 union iwreq_data *wrqu, char *extra)
9677 {
9678 struct ipw_priv *priv = libipw_priv(dev);
9679 int mode = *(int *)extra;
9680 int err;
9681
9682 mutex_lock(&priv->mutex);
9683 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9684 mode = IPW_POWER_AC;
9685
9686 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9687 err = ipw_send_power_mode(priv, mode);
9688 if (err) {
9689 IPW_DEBUG_WX("failed setting power mode.\n");
9690 mutex_unlock(&priv->mutex);
9691 return err;
9692 }
9693 priv->power_mode = IPW_POWER_ENABLED | mode;
9694 }
9695 mutex_unlock(&priv->mutex);
9696 return 0;
9697 }
9698
9699 #define MAX_WX_STRING 80
9700 static int ipw_wx_get_powermode(struct net_device *dev,
9701 struct iw_request_info *info,
9702 union iwreq_data *wrqu, char *extra)
9703 {
9704 struct ipw_priv *priv = libipw_priv(dev);
9705 int level = IPW_POWER_LEVEL(priv->power_mode);
9706 char *p = extra;
9707
9708 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9709
9710 switch (level) {
9711 case IPW_POWER_AC:
9712 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9713 break;
9714 case IPW_POWER_BATTERY:
9715 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9716 break;
9717 default:
9718 p += snprintf(p, MAX_WX_STRING - (p - extra),
9719 "(Timeout %dms, Period %dms)",
9720 timeout_duration[level - 1] / 1000,
9721 period_duration[level - 1] / 1000);
9722 }
9723
9724 if (!(priv->power_mode & IPW_POWER_ENABLED))
9725 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9726
9727 wrqu->data.length = p - extra + 1;
9728
9729 return 0;
9730 }
9731
9732 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9733 struct iw_request_info *info,
9734 union iwreq_data *wrqu, char *extra)
9735 {
9736 struct ipw_priv *priv = libipw_priv(dev);
9737 int mode = *(int *)extra;
9738 u8 band = 0, modulation = 0;
9739
9740 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9741 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9742 return -EINVAL;
9743 }
9744 mutex_lock(&priv->mutex);
9745 if (priv->adapter == IPW_2915ABG) {
9746 priv->ieee->abg_true = 1;
9747 if (mode & IEEE_A) {
9748 band |= LIBIPW_52GHZ_BAND;
9749 modulation |= LIBIPW_OFDM_MODULATION;
9750 } else
9751 priv->ieee->abg_true = 0;
9752 } else {
9753 if (mode & IEEE_A) {
9754 IPW_WARNING("Attempt to set 2200BG into "
9755 "802.11a mode\n");
9756 mutex_unlock(&priv->mutex);
9757 return -EINVAL;
9758 }
9759
9760 priv->ieee->abg_true = 0;
9761 }
9762
9763 if (mode & IEEE_B) {
9764 band |= LIBIPW_24GHZ_BAND;
9765 modulation |= LIBIPW_CCK_MODULATION;
9766 } else
9767 priv->ieee->abg_true = 0;
9768
9769 if (mode & IEEE_G) {
9770 band |= LIBIPW_24GHZ_BAND;
9771 modulation |= LIBIPW_OFDM_MODULATION;
9772 } else
9773 priv->ieee->abg_true = 0;
9774
9775 priv->ieee->mode = mode;
9776 priv->ieee->freq_band = band;
9777 priv->ieee->modulation = modulation;
9778 init_supported_rates(priv, &priv->rates);
9779
9780 /* Network configuration changed -- force [re]association */
9781 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9782 if (!ipw_disassociate(priv)) {
9783 ipw_send_supported_rates(priv, &priv->rates);
9784 ipw_associate(priv);
9785 }
9786
9787 /* Update the band LEDs */
9788 ipw_led_band_on(priv);
9789
9790 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9791 mode & IEEE_A ? 'a' : '.',
9792 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9793 mutex_unlock(&priv->mutex);
9794 return 0;
9795 }
9796
9797 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9798 struct iw_request_info *info,
9799 union iwreq_data *wrqu, char *extra)
9800 {
9801 struct ipw_priv *priv = libipw_priv(dev);
9802 mutex_lock(&priv->mutex);
9803 switch (priv->ieee->mode) {
9804 case IEEE_A:
9805 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9806 break;
9807 case IEEE_B:
9808 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9809 break;
9810 case IEEE_A | IEEE_B:
9811 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9812 break;
9813 case IEEE_G:
9814 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9815 break;
9816 case IEEE_A | IEEE_G:
9817 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9818 break;
9819 case IEEE_B | IEEE_G:
9820 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9821 break;
9822 case IEEE_A | IEEE_B | IEEE_G:
9823 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9824 break;
9825 default:
9826 strncpy(extra, "unknown", MAX_WX_STRING);
9827 break;
9828 }
9829
9830 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9831
9832 wrqu->data.length = strlen(extra) + 1;
9833 mutex_unlock(&priv->mutex);
9834
9835 return 0;
9836 }
9837
9838 static int ipw_wx_set_preamble(struct net_device *dev,
9839 struct iw_request_info *info,
9840 union iwreq_data *wrqu, char *extra)
9841 {
9842 struct ipw_priv *priv = libipw_priv(dev);
9843 int mode = *(int *)extra;
9844 mutex_lock(&priv->mutex);
9845 /* Switching from SHORT -> LONG requires a disassociation */
9846 if (mode == 1) {
9847 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9848 priv->config |= CFG_PREAMBLE_LONG;
9849
9850 /* Network configuration changed -- force [re]association */
9851 IPW_DEBUG_ASSOC
9852 ("[re]association triggered due to preamble change.\n");
9853 if (!ipw_disassociate(priv))
9854 ipw_associate(priv);
9855 }
9856 goto done;
9857 }
9858
9859 if (mode == 0) {
9860 priv->config &= ~CFG_PREAMBLE_LONG;
9861 goto done;
9862 }
9863 mutex_unlock(&priv->mutex);
9864 return -EINVAL;
9865
9866 done:
9867 mutex_unlock(&priv->mutex);
9868 return 0;
9869 }
9870
9871 static int ipw_wx_get_preamble(struct net_device *dev,
9872 struct iw_request_info *info,
9873 union iwreq_data *wrqu, char *extra)
9874 {
9875 struct ipw_priv *priv = libipw_priv(dev);
9876 mutex_lock(&priv->mutex);
9877 if (priv->config & CFG_PREAMBLE_LONG)
9878 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9879 else
9880 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9881 mutex_unlock(&priv->mutex);
9882 return 0;
9883 }
9884
9885 #ifdef CONFIG_IPW2200_MONITOR
9886 static int ipw_wx_set_monitor(struct net_device *dev,
9887 struct iw_request_info *info,
9888 union iwreq_data *wrqu, char *extra)
9889 {
9890 struct ipw_priv *priv = libipw_priv(dev);
9891 int *parms = (int *)extra;
9892 int enable = (parms[0] > 0);
9893 mutex_lock(&priv->mutex);
9894 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9895 if (enable) {
9896 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9897 #ifdef CONFIG_IPW2200_RADIOTAP
9898 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9899 #else
9900 priv->net_dev->type = ARPHRD_IEEE80211;
9901 #endif
9902 queue_work(priv->workqueue, &priv->adapter_restart);
9903 }
9904
9905 ipw_set_channel(priv, parms[1]);
9906 } else {
9907 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9908 mutex_unlock(&priv->mutex);
9909 return 0;
9910 }
9911 priv->net_dev->type = ARPHRD_ETHER;
9912 queue_work(priv->workqueue, &priv->adapter_restart);
9913 }
9914 mutex_unlock(&priv->mutex);
9915 return 0;
9916 }
9917
9918 #endif /* CONFIG_IPW2200_MONITOR */
9919
9920 static int ipw_wx_reset(struct net_device *dev,
9921 struct iw_request_info *info,
9922 union iwreq_data *wrqu, char *extra)
9923 {
9924 struct ipw_priv *priv = libipw_priv(dev);
9925 IPW_DEBUG_WX("RESET\n");
9926 queue_work(priv->workqueue, &priv->adapter_restart);
9927 return 0;
9928 }
9929
9930 static int ipw_wx_sw_reset(struct net_device *dev,
9931 struct iw_request_info *info,
9932 union iwreq_data *wrqu, char *extra)
9933 {
9934 struct ipw_priv *priv = libipw_priv(dev);
9935 union iwreq_data wrqu_sec = {
9936 .encoding = {
9937 .flags = IW_ENCODE_DISABLED,
9938 },
9939 };
9940 int ret;
9941
9942 IPW_DEBUG_WX("SW_RESET\n");
9943
9944 mutex_lock(&priv->mutex);
9945
9946 ret = ipw_sw_reset(priv, 2);
9947 if (!ret) {
9948 free_firmware();
9949 ipw_adapter_restart(priv);
9950 }
9951
9952 /* The SW reset bit might have been toggled on by the 'disable'
9953 * module parameter, so take appropriate action */
9954 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9955
9956 mutex_unlock(&priv->mutex);
9957 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9958 mutex_lock(&priv->mutex);
9959
9960 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9961 /* Configuration likely changed -- force [re]association */
9962 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9963 "reset.\n");
9964 if (!ipw_disassociate(priv))
9965 ipw_associate(priv);
9966 }
9967
9968 mutex_unlock(&priv->mutex);
9969
9970 return 0;
9971 }
9972
9973 /* Rebase the WE IOCTLs to zero for the handler array */
9974 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9975 static iw_handler ipw_wx_handlers[] = {
9976 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
9977 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9978 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9979 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9980 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9981 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9982 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9983 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9984 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9985 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9986 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9987 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9988 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9989 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9990 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9991 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9992 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9993 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9994 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9995 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9996 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9997 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9998 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9999 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
10000 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
10001 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
10002 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
10003 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
10004 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
10005 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
10006 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
10007 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
10008 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
10009 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
10010 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
10011 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
10012 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
10013 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
10014 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
10015 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
10016 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
10017 };
10018
10019 enum {
10020 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10021 IPW_PRIV_GET_POWER,
10022 IPW_PRIV_SET_MODE,
10023 IPW_PRIV_GET_MODE,
10024 IPW_PRIV_SET_PREAMBLE,
10025 IPW_PRIV_GET_PREAMBLE,
10026 IPW_PRIV_RESET,
10027 IPW_PRIV_SW_RESET,
10028 #ifdef CONFIG_IPW2200_MONITOR
10029 IPW_PRIV_SET_MONITOR,
10030 #endif
10031 };
10032
10033 static struct iw_priv_args ipw_priv_args[] = {
10034 {
10035 .cmd = IPW_PRIV_SET_POWER,
10036 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10037 .name = "set_power"},
10038 {
10039 .cmd = IPW_PRIV_GET_POWER,
10040 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10041 .name = "get_power"},
10042 {
10043 .cmd = IPW_PRIV_SET_MODE,
10044 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10045 .name = "set_mode"},
10046 {
10047 .cmd = IPW_PRIV_GET_MODE,
10048 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10049 .name = "get_mode"},
10050 {
10051 .cmd = IPW_PRIV_SET_PREAMBLE,
10052 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10053 .name = "set_preamble"},
10054 {
10055 .cmd = IPW_PRIV_GET_PREAMBLE,
10056 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10057 .name = "get_preamble"},
10058 {
10059 IPW_PRIV_RESET,
10060 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10061 {
10062 IPW_PRIV_SW_RESET,
10063 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10064 #ifdef CONFIG_IPW2200_MONITOR
10065 {
10066 IPW_PRIV_SET_MONITOR,
10067 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10068 #endif /* CONFIG_IPW2200_MONITOR */
10069 };
10070
10071 static iw_handler ipw_priv_handler[] = {
10072 ipw_wx_set_powermode,
10073 ipw_wx_get_powermode,
10074 ipw_wx_set_wireless_mode,
10075 ipw_wx_get_wireless_mode,
10076 ipw_wx_set_preamble,
10077 ipw_wx_get_preamble,
10078 ipw_wx_reset,
10079 ipw_wx_sw_reset,
10080 #ifdef CONFIG_IPW2200_MONITOR
10081 ipw_wx_set_monitor,
10082 #endif
10083 };
10084
10085 static struct iw_handler_def ipw_wx_handler_def = {
10086 .standard = ipw_wx_handlers,
10087 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10088 .num_private = ARRAY_SIZE(ipw_priv_handler),
10089 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10090 .private = ipw_priv_handler,
10091 .private_args = ipw_priv_args,
10092 .get_wireless_stats = ipw_get_wireless_stats,
10093 };
10094
10095 /*
10096 * Get wireless statistics.
10097 * Called by /proc/net/wireless
10098 * Also called by SIOCGIWSTATS
10099 */
10100 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10101 {
10102 struct ipw_priv *priv = libipw_priv(dev);
10103 struct iw_statistics *wstats;
10104
10105 wstats = &priv->wstats;
10106
10107 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10108 * netdev->get_wireless_stats seems to be called before fw is
10109 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10110 * and associated; if not associcated, the values are all meaningless
10111 * anyway, so set them all to NULL and INVALID */
10112 if (!(priv->status & STATUS_ASSOCIATED)) {
10113 wstats->miss.beacon = 0;
10114 wstats->discard.retries = 0;
10115 wstats->qual.qual = 0;
10116 wstats->qual.level = 0;
10117 wstats->qual.noise = 0;
10118 wstats->qual.updated = 7;
10119 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10120 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10121 return wstats;
10122 }
10123
10124 wstats->qual.qual = priv->quality;
10125 wstats->qual.level = priv->exp_avg_rssi;
10126 wstats->qual.noise = priv->exp_avg_noise;
10127 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10128 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10129
10130 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10131 wstats->discard.retries = priv->last_tx_failures;
10132 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10133
10134 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10135 goto fail_get_ordinal;
10136 wstats->discard.retries += tx_retry; */
10137
10138 return wstats;
10139 }
10140
10141 /* net device stuff */
10142
10143 static void init_sys_config(struct ipw_sys_config *sys_config)
10144 {
10145 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10146 sys_config->bt_coexistence = 0;
10147 sys_config->answer_broadcast_ssid_probe = 0;
10148 sys_config->accept_all_data_frames = 0;
10149 sys_config->accept_non_directed_frames = 1;
10150 sys_config->exclude_unicast_unencrypted = 0;
10151 sys_config->disable_unicast_decryption = 1;
10152 sys_config->exclude_multicast_unencrypted = 0;
10153 sys_config->disable_multicast_decryption = 1;
10154 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10155 antenna = CFG_SYS_ANTENNA_BOTH;
10156 sys_config->antenna_diversity = antenna;
10157 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10158 sys_config->dot11g_auto_detection = 0;
10159 sys_config->enable_cts_to_self = 0;
10160 sys_config->bt_coexist_collision_thr = 0;
10161 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10162 sys_config->silence_threshold = 0x1e;
10163 }
10164
10165 static int ipw_net_open(struct net_device *dev)
10166 {
10167 IPW_DEBUG_INFO("dev->open\n");
10168 netif_start_queue(dev);
10169 return 0;
10170 }
10171
10172 static int ipw_net_stop(struct net_device *dev)
10173 {
10174 IPW_DEBUG_INFO("dev->close\n");
10175 netif_stop_queue(dev);
10176 return 0;
10177 }
10178
10179 /*
10180 todo:
10181
10182 modify to send one tfd per fragment instead of using chunking. otherwise
10183 we need to heavily modify the libipw_skb_to_txb.
10184 */
10185
10186 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10187 int pri)
10188 {
10189 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10190 txb->fragments[0]->data;
10191 int i = 0;
10192 struct tfd_frame *tfd;
10193 #ifdef CONFIG_IPW2200_QOS
10194 int tx_id = ipw_get_tx_queue_number(priv, pri);
10195 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10196 #else
10197 struct clx2_tx_queue *txq = &priv->txq[0];
10198 #endif
10199 struct clx2_queue *q = &txq->q;
10200 u8 id, hdr_len, unicast;
10201 int fc;
10202
10203 if (!(priv->status & STATUS_ASSOCIATED))
10204 goto drop;
10205
10206 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10207 switch (priv->ieee->iw_mode) {
10208 case IW_MODE_ADHOC:
10209 unicast = !is_multicast_ether_addr(hdr->addr1);
10210 id = ipw_find_station(priv, hdr->addr1);
10211 if (id == IPW_INVALID_STATION) {
10212 id = ipw_add_station(priv, hdr->addr1);
10213 if (id == IPW_INVALID_STATION) {
10214 IPW_WARNING("Attempt to send data to "
10215 "invalid cell: %pM\n",
10216 hdr->addr1);
10217 goto drop;
10218 }
10219 }
10220 break;
10221
10222 case IW_MODE_INFRA:
10223 default:
10224 unicast = !is_multicast_ether_addr(hdr->addr3);
10225 id = 0;
10226 break;
10227 }
10228
10229 tfd = &txq->bd[q->first_empty];
10230 txq->txb[q->first_empty] = txb;
10231 memset(tfd, 0, sizeof(*tfd));
10232 tfd->u.data.station_number = id;
10233
10234 tfd->control_flags.message_type = TX_FRAME_TYPE;
10235 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10236
10237 tfd->u.data.cmd_id = DINO_CMD_TX;
10238 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10239
10240 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10241 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10242 else
10243 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10244
10245 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10246 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10247
10248 fc = le16_to_cpu(hdr->frame_ctl);
10249 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10250
10251 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10252
10253 if (likely(unicast))
10254 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10255
10256 if (txb->encrypted && !priv->ieee->host_encrypt) {
10257 switch (priv->ieee->sec.level) {
10258 case SEC_LEVEL_3:
10259 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10260 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10261 /* XXX: ACK flag must be set for CCMP even if it
10262 * is a multicast/broadcast packet, because CCMP
10263 * group communication encrypted by GTK is
10264 * actually done by the AP. */
10265 if (!unicast)
10266 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10267
10268 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10269 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10270 tfd->u.data.key_index = 0;
10271 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10272 break;
10273 case SEC_LEVEL_2:
10274 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10275 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10276 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10277 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10278 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10279 break;
10280 case SEC_LEVEL_1:
10281 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10282 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10283 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10284 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10285 40)
10286 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10287 else
10288 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10289 break;
10290 case SEC_LEVEL_0:
10291 break;
10292 default:
10293 printk(KERN_ERR "Unknow security level %d\n",
10294 priv->ieee->sec.level);
10295 break;
10296 }
10297 } else
10298 /* No hardware encryption */
10299 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10300
10301 #ifdef CONFIG_IPW2200_QOS
10302 if (fc & IEEE80211_STYPE_QOS_DATA)
10303 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10304 #endif /* CONFIG_IPW2200_QOS */
10305
10306 /* payload */
10307 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10308 txb->nr_frags));
10309 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10310 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10311 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10312 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10313 i, le32_to_cpu(tfd->u.data.num_chunks),
10314 txb->fragments[i]->len - hdr_len);
10315 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10316 i, tfd->u.data.num_chunks,
10317 txb->fragments[i]->len - hdr_len);
10318 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10319 txb->fragments[i]->len - hdr_len);
10320
10321 tfd->u.data.chunk_ptr[i] =
10322 cpu_to_le32(pci_map_single
10323 (priv->pci_dev,
10324 txb->fragments[i]->data + hdr_len,
10325 txb->fragments[i]->len - hdr_len,
10326 PCI_DMA_TODEVICE));
10327 tfd->u.data.chunk_len[i] =
10328 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10329 }
10330
10331 if (i != txb->nr_frags) {
10332 struct sk_buff *skb;
10333 u16 remaining_bytes = 0;
10334 int j;
10335
10336 for (j = i; j < txb->nr_frags; j++)
10337 remaining_bytes += txb->fragments[j]->len - hdr_len;
10338
10339 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10340 remaining_bytes);
10341 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10342 if (skb != NULL) {
10343 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10344 for (j = i; j < txb->nr_frags; j++) {
10345 int size = txb->fragments[j]->len - hdr_len;
10346
10347 printk(KERN_INFO "Adding frag %d %d...\n",
10348 j, size);
10349 memcpy(skb_put(skb, size),
10350 txb->fragments[j]->data + hdr_len, size);
10351 }
10352 dev_kfree_skb_any(txb->fragments[i]);
10353 txb->fragments[i] = skb;
10354 tfd->u.data.chunk_ptr[i] =
10355 cpu_to_le32(pci_map_single
10356 (priv->pci_dev, skb->data,
10357 remaining_bytes,
10358 PCI_DMA_TODEVICE));
10359
10360 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10361 }
10362 }
10363
10364 /* kick DMA */
10365 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10366 ipw_write32(priv, q->reg_w, q->first_empty);
10367
10368 if (ipw_tx_queue_space(q) < q->high_mark)
10369 netif_stop_queue(priv->net_dev);
10370
10371 return NETDEV_TX_OK;
10372
10373 drop:
10374 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10375 libipw_txb_free(txb);
10376 return NETDEV_TX_OK;
10377 }
10378
10379 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10380 {
10381 struct ipw_priv *priv = libipw_priv(dev);
10382 #ifdef CONFIG_IPW2200_QOS
10383 int tx_id = ipw_get_tx_queue_number(priv, pri);
10384 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10385 #else
10386 struct clx2_tx_queue *txq = &priv->txq[0];
10387 #endif /* CONFIG_IPW2200_QOS */
10388
10389 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10390 return 1;
10391
10392 return 0;
10393 }
10394
10395 #ifdef CONFIG_IPW2200_PROMISCUOUS
10396 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10397 struct libipw_txb *txb)
10398 {
10399 struct libipw_rx_stats dummystats;
10400 struct ieee80211_hdr *hdr;
10401 u8 n;
10402 u16 filter = priv->prom_priv->filter;
10403 int hdr_only = 0;
10404
10405 if (filter & IPW_PROM_NO_TX)
10406 return;
10407
10408 memset(&dummystats, 0, sizeof(dummystats));
10409
10410 /* Filtering of fragment chains is done agains the first fragment */
10411 hdr = (void *)txb->fragments[0]->data;
10412 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10413 if (filter & IPW_PROM_NO_MGMT)
10414 return;
10415 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10416 hdr_only = 1;
10417 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10418 if (filter & IPW_PROM_NO_CTL)
10419 return;
10420 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10421 hdr_only = 1;
10422 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10423 if (filter & IPW_PROM_NO_DATA)
10424 return;
10425 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10426 hdr_only = 1;
10427 }
10428
10429 for(n=0; n<txb->nr_frags; ++n) {
10430 struct sk_buff *src = txb->fragments[n];
10431 struct sk_buff *dst;
10432 struct ieee80211_radiotap_header *rt_hdr;
10433 int len;
10434
10435 if (hdr_only) {
10436 hdr = (void *)src->data;
10437 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10438 } else
10439 len = src->len;
10440
10441 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10442 if (!dst)
10443 continue;
10444
10445 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10446
10447 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10448 rt_hdr->it_pad = 0;
10449 rt_hdr->it_present = 0; /* after all, it's just an idea */
10450 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10451
10452 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10453 ieee80211chan2mhz(priv->channel));
10454 if (priv->channel > 14) /* 802.11a */
10455 *(__le16*)skb_put(dst, sizeof(u16)) =
10456 cpu_to_le16(IEEE80211_CHAN_OFDM |
10457 IEEE80211_CHAN_5GHZ);
10458 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10459 *(__le16*)skb_put(dst, sizeof(u16)) =
10460 cpu_to_le16(IEEE80211_CHAN_CCK |
10461 IEEE80211_CHAN_2GHZ);
10462 else /* 802.11g */
10463 *(__le16*)skb_put(dst, sizeof(u16)) =
10464 cpu_to_le16(IEEE80211_CHAN_OFDM |
10465 IEEE80211_CHAN_2GHZ);
10466
10467 rt_hdr->it_len = cpu_to_le16(dst->len);
10468
10469 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10470
10471 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10472 dev_kfree_skb_any(dst);
10473 }
10474 }
10475 #endif
10476
10477 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10478 struct net_device *dev, int pri)
10479 {
10480 struct ipw_priv *priv = libipw_priv(dev);
10481 unsigned long flags;
10482 netdev_tx_t ret;
10483
10484 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10485 spin_lock_irqsave(&priv->lock, flags);
10486
10487 #ifdef CONFIG_IPW2200_PROMISCUOUS
10488 if (rtap_iface && netif_running(priv->prom_net_dev))
10489 ipw_handle_promiscuous_tx(priv, txb);
10490 #endif
10491
10492 ret = ipw_tx_skb(priv, txb, pri);
10493 if (ret == NETDEV_TX_OK)
10494 __ipw_led_activity_on(priv);
10495 spin_unlock_irqrestore(&priv->lock, flags);
10496
10497 return ret;
10498 }
10499
10500 static void ipw_net_set_multicast_list(struct net_device *dev)
10501 {
10502
10503 }
10504
10505 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10506 {
10507 struct ipw_priv *priv = libipw_priv(dev);
10508 struct sockaddr *addr = p;
10509
10510 if (!is_valid_ether_addr(addr->sa_data))
10511 return -EADDRNOTAVAIL;
10512 mutex_lock(&priv->mutex);
10513 priv->config |= CFG_CUSTOM_MAC;
10514 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10515 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10516 priv->net_dev->name, priv->mac_addr);
10517 queue_work(priv->workqueue, &priv->adapter_restart);
10518 mutex_unlock(&priv->mutex);
10519 return 0;
10520 }
10521
10522 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10523 struct ethtool_drvinfo *info)
10524 {
10525 struct ipw_priv *p = libipw_priv(dev);
10526 char vers[64];
10527 char date[32];
10528 u32 len;
10529
10530 strcpy(info->driver, DRV_NAME);
10531 strcpy(info->version, DRV_VERSION);
10532
10533 len = sizeof(vers);
10534 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10535 len = sizeof(date);
10536 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10537
10538 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10539 vers, date);
10540 strcpy(info->bus_info, pci_name(p->pci_dev));
10541 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10542 }
10543
10544 static u32 ipw_ethtool_get_link(struct net_device *dev)
10545 {
10546 struct ipw_priv *priv = libipw_priv(dev);
10547 return (priv->status & STATUS_ASSOCIATED) != 0;
10548 }
10549
10550 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10551 {
10552 return IPW_EEPROM_IMAGE_SIZE;
10553 }
10554
10555 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10556 struct ethtool_eeprom *eeprom, u8 * bytes)
10557 {
10558 struct ipw_priv *p = libipw_priv(dev);
10559
10560 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10561 return -EINVAL;
10562 mutex_lock(&p->mutex);
10563 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10564 mutex_unlock(&p->mutex);
10565 return 0;
10566 }
10567
10568 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10569 struct ethtool_eeprom *eeprom, u8 * bytes)
10570 {
10571 struct ipw_priv *p = libipw_priv(dev);
10572 int i;
10573
10574 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10575 return -EINVAL;
10576 mutex_lock(&p->mutex);
10577 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10578 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10579 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10580 mutex_unlock(&p->mutex);
10581 return 0;
10582 }
10583
10584 static const struct ethtool_ops ipw_ethtool_ops = {
10585 .get_link = ipw_ethtool_get_link,
10586 .get_drvinfo = ipw_ethtool_get_drvinfo,
10587 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10588 .get_eeprom = ipw_ethtool_get_eeprom,
10589 .set_eeprom = ipw_ethtool_set_eeprom,
10590 };
10591
10592 static irqreturn_t ipw_isr(int irq, void *data)
10593 {
10594 struct ipw_priv *priv = data;
10595 u32 inta, inta_mask;
10596
10597 if (!priv)
10598 return IRQ_NONE;
10599
10600 spin_lock(&priv->irq_lock);
10601
10602 if (!(priv->status & STATUS_INT_ENABLED)) {
10603 /* IRQ is disabled */
10604 goto none;
10605 }
10606
10607 inta = ipw_read32(priv, IPW_INTA_RW);
10608 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10609
10610 if (inta == 0xFFFFFFFF) {
10611 /* Hardware disappeared */
10612 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10613 goto none;
10614 }
10615
10616 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10617 /* Shared interrupt */
10618 goto none;
10619 }
10620
10621 /* tell the device to stop sending interrupts */
10622 __ipw_disable_interrupts(priv);
10623
10624 /* ack current interrupts */
10625 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10626 ipw_write32(priv, IPW_INTA_RW, inta);
10627
10628 /* Cache INTA value for our tasklet */
10629 priv->isr_inta = inta;
10630
10631 tasklet_schedule(&priv->irq_tasklet);
10632
10633 spin_unlock(&priv->irq_lock);
10634
10635 return IRQ_HANDLED;
10636 none:
10637 spin_unlock(&priv->irq_lock);
10638 return IRQ_NONE;
10639 }
10640
10641 static void ipw_rf_kill(void *adapter)
10642 {
10643 struct ipw_priv *priv = adapter;
10644 unsigned long flags;
10645
10646 spin_lock_irqsave(&priv->lock, flags);
10647
10648 if (rf_kill_active(priv)) {
10649 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10650 if (priv->workqueue)
10651 queue_delayed_work(priv->workqueue,
10652 &priv->rf_kill, 2 * HZ);
10653 goto exit_unlock;
10654 }
10655
10656 /* RF Kill is now disabled, so bring the device back up */
10657
10658 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10659 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10660 "device\n");
10661
10662 /* we can not do an adapter restart while inside an irq lock */
10663 queue_work(priv->workqueue, &priv->adapter_restart);
10664 } else
10665 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10666 "enabled\n");
10667
10668 exit_unlock:
10669 spin_unlock_irqrestore(&priv->lock, flags);
10670 }
10671
10672 static void ipw_bg_rf_kill(struct work_struct *work)
10673 {
10674 struct ipw_priv *priv =
10675 container_of(work, struct ipw_priv, rf_kill.work);
10676 mutex_lock(&priv->mutex);
10677 ipw_rf_kill(priv);
10678 mutex_unlock(&priv->mutex);
10679 }
10680
10681 static void ipw_link_up(struct ipw_priv *priv)
10682 {
10683 priv->last_seq_num = -1;
10684 priv->last_frag_num = -1;
10685 priv->last_packet_time = 0;
10686
10687 netif_carrier_on(priv->net_dev);
10688
10689 cancel_delayed_work(&priv->request_scan);
10690 cancel_delayed_work(&priv->request_direct_scan);
10691 cancel_delayed_work(&priv->request_passive_scan);
10692 cancel_delayed_work(&priv->scan_event);
10693 ipw_reset_stats(priv);
10694 /* Ensure the rate is updated immediately */
10695 priv->last_rate = ipw_get_current_rate(priv);
10696 ipw_gather_stats(priv);
10697 ipw_led_link_up(priv);
10698 notify_wx_assoc_event(priv);
10699
10700 if (priv->config & CFG_BACKGROUND_SCAN)
10701 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10702 }
10703
10704 static void ipw_bg_link_up(struct work_struct *work)
10705 {
10706 struct ipw_priv *priv =
10707 container_of(work, struct ipw_priv, link_up);
10708 mutex_lock(&priv->mutex);
10709 ipw_link_up(priv);
10710 mutex_unlock(&priv->mutex);
10711 }
10712
10713 static void ipw_link_down(struct ipw_priv *priv)
10714 {
10715 ipw_led_link_down(priv);
10716 netif_carrier_off(priv->net_dev);
10717 notify_wx_assoc_event(priv);
10718
10719 /* Cancel any queued work ... */
10720 cancel_delayed_work(&priv->request_scan);
10721 cancel_delayed_work(&priv->request_direct_scan);
10722 cancel_delayed_work(&priv->request_passive_scan);
10723 cancel_delayed_work(&priv->adhoc_check);
10724 cancel_delayed_work(&priv->gather_stats);
10725
10726 ipw_reset_stats(priv);
10727
10728 if (!(priv->status & STATUS_EXIT_PENDING)) {
10729 /* Queue up another scan... */
10730 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10731 } else
10732 cancel_delayed_work(&priv->scan_event);
10733 }
10734
10735 static void ipw_bg_link_down(struct work_struct *work)
10736 {
10737 struct ipw_priv *priv =
10738 container_of(work, struct ipw_priv, link_down);
10739 mutex_lock(&priv->mutex);
10740 ipw_link_down(priv);
10741 mutex_unlock(&priv->mutex);
10742 }
10743
10744 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10745 {
10746 int ret = 0;
10747
10748 priv->workqueue = create_workqueue(DRV_NAME);
10749 init_waitqueue_head(&priv->wait_command_queue);
10750 init_waitqueue_head(&priv->wait_state);
10751
10752 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10753 INIT_WORK(&priv->associate, ipw_bg_associate);
10754 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10755 INIT_WORK(&priv->system_config, ipw_system_config);
10756 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10757 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10758 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10759 INIT_WORK(&priv->up, ipw_bg_up);
10760 INIT_WORK(&priv->down, ipw_bg_down);
10761 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10762 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10763 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10764 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10765 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10766 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10767 INIT_WORK(&priv->roam, ipw_bg_roam);
10768 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10769 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10770 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10771 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10772 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10773 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10774 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10775
10776 #ifdef CONFIG_IPW2200_QOS
10777 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10778 #endif /* CONFIG_IPW2200_QOS */
10779
10780 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10781 ipw_irq_tasklet, (unsigned long)priv);
10782
10783 return ret;
10784 }
10785
10786 static void shim__set_security(struct net_device *dev,
10787 struct libipw_security *sec)
10788 {
10789 struct ipw_priv *priv = libipw_priv(dev);
10790 int i;
10791 for (i = 0; i < 4; i++) {
10792 if (sec->flags & (1 << i)) {
10793 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10794 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10795 if (sec->key_sizes[i] == 0)
10796 priv->ieee->sec.flags &= ~(1 << i);
10797 else {
10798 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10799 sec->key_sizes[i]);
10800 priv->ieee->sec.flags |= (1 << i);
10801 }
10802 priv->status |= STATUS_SECURITY_UPDATED;
10803 } else if (sec->level != SEC_LEVEL_1)
10804 priv->ieee->sec.flags &= ~(1 << i);
10805 }
10806
10807 if (sec->flags & SEC_ACTIVE_KEY) {
10808 if (sec->active_key <= 3) {
10809 priv->ieee->sec.active_key = sec->active_key;
10810 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10811 } else
10812 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10813 priv->status |= STATUS_SECURITY_UPDATED;
10814 } else
10815 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10816
10817 if ((sec->flags & SEC_AUTH_MODE) &&
10818 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10819 priv->ieee->sec.auth_mode = sec->auth_mode;
10820 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10821 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10822 priv->capability |= CAP_SHARED_KEY;
10823 else
10824 priv->capability &= ~CAP_SHARED_KEY;
10825 priv->status |= STATUS_SECURITY_UPDATED;
10826 }
10827
10828 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10829 priv->ieee->sec.flags |= SEC_ENABLED;
10830 priv->ieee->sec.enabled = sec->enabled;
10831 priv->status |= STATUS_SECURITY_UPDATED;
10832 if (sec->enabled)
10833 priv->capability |= CAP_PRIVACY_ON;
10834 else
10835 priv->capability &= ~CAP_PRIVACY_ON;
10836 }
10837
10838 if (sec->flags & SEC_ENCRYPT)
10839 priv->ieee->sec.encrypt = sec->encrypt;
10840
10841 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10842 priv->ieee->sec.level = sec->level;
10843 priv->ieee->sec.flags |= SEC_LEVEL;
10844 priv->status |= STATUS_SECURITY_UPDATED;
10845 }
10846
10847 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10848 ipw_set_hwcrypto_keys(priv);
10849
10850 /* To match current functionality of ipw2100 (which works well w/
10851 * various supplicants, we don't force a disassociate if the
10852 * privacy capability changes ... */
10853 #if 0
10854 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10855 (((priv->assoc_request.capability &
10856 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10857 (!(priv->assoc_request.capability &
10858 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10859 IPW_DEBUG_ASSOC("Disassociating due to capability "
10860 "change.\n");
10861 ipw_disassociate(priv);
10862 }
10863 #endif
10864 }
10865
10866 static int init_supported_rates(struct ipw_priv *priv,
10867 struct ipw_supported_rates *rates)
10868 {
10869 /* TODO: Mask out rates based on priv->rates_mask */
10870
10871 memset(rates, 0, sizeof(*rates));
10872 /* configure supported rates */
10873 switch (priv->ieee->freq_band) {
10874 case LIBIPW_52GHZ_BAND:
10875 rates->ieee_mode = IPW_A_MODE;
10876 rates->purpose = IPW_RATE_CAPABILITIES;
10877 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10878 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10879 break;
10880
10881 default: /* Mixed or 2.4Ghz */
10882 rates->ieee_mode = IPW_G_MODE;
10883 rates->purpose = IPW_RATE_CAPABILITIES;
10884 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10885 LIBIPW_CCK_DEFAULT_RATES_MASK);
10886 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10887 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10888 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10889 }
10890 break;
10891 }
10892
10893 return 0;
10894 }
10895
10896 static int ipw_config(struct ipw_priv *priv)
10897 {
10898 /* This is only called from ipw_up, which resets/reloads the firmware
10899 so, we don't need to first disable the card before we configure
10900 it */
10901 if (ipw_set_tx_power(priv))
10902 goto error;
10903
10904 /* initialize adapter address */
10905 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10906 goto error;
10907
10908 /* set basic system config settings */
10909 init_sys_config(&priv->sys_config);
10910
10911 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10912 * Does not support BT priority yet (don't abort or defer our Tx) */
10913 if (bt_coexist) {
10914 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10915
10916 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10917 priv->sys_config.bt_coexistence
10918 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10919 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10920 priv->sys_config.bt_coexistence
10921 |= CFG_BT_COEXISTENCE_OOB;
10922 }
10923
10924 #ifdef CONFIG_IPW2200_PROMISCUOUS
10925 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10926 priv->sys_config.accept_all_data_frames = 1;
10927 priv->sys_config.accept_non_directed_frames = 1;
10928 priv->sys_config.accept_all_mgmt_bcpr = 1;
10929 priv->sys_config.accept_all_mgmt_frames = 1;
10930 }
10931 #endif
10932
10933 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10934 priv->sys_config.answer_broadcast_ssid_probe = 1;
10935 else
10936 priv->sys_config.answer_broadcast_ssid_probe = 0;
10937
10938 if (ipw_send_system_config(priv))
10939 goto error;
10940
10941 init_supported_rates(priv, &priv->rates);
10942 if (ipw_send_supported_rates(priv, &priv->rates))
10943 goto error;
10944
10945 /* Set request-to-send threshold */
10946 if (priv->rts_threshold) {
10947 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10948 goto error;
10949 }
10950 #ifdef CONFIG_IPW2200_QOS
10951 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10952 ipw_qos_activate(priv, NULL);
10953 #endif /* CONFIG_IPW2200_QOS */
10954
10955 if (ipw_set_random_seed(priv))
10956 goto error;
10957
10958 /* final state transition to the RUN state */
10959 if (ipw_send_host_complete(priv))
10960 goto error;
10961
10962 priv->status |= STATUS_INIT;
10963
10964 ipw_led_init(priv);
10965 ipw_led_radio_on(priv);
10966 priv->notif_missed_beacons = 0;
10967
10968 /* Set hardware WEP key if it is configured. */
10969 if ((priv->capability & CAP_PRIVACY_ON) &&
10970 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10971 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10972 ipw_set_hwcrypto_keys(priv);
10973
10974 return 0;
10975
10976 error:
10977 return -EIO;
10978 }
10979
10980 /*
10981 * NOTE:
10982 *
10983 * These tables have been tested in conjunction with the
10984 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10985 *
10986 * Altering this values, using it on other hardware, or in geographies
10987 * not intended for resale of the above mentioned Intel adapters has
10988 * not been tested.
10989 *
10990 * Remember to update the table in README.ipw2200 when changing this
10991 * table.
10992 *
10993 */
10994 static const struct libipw_geo ipw_geos[] = {
10995 { /* Restricted */
10996 "---",
10997 .bg_channels = 11,
10998 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10999 {2427, 4}, {2432, 5}, {2437, 6},
11000 {2442, 7}, {2447, 8}, {2452, 9},
11001 {2457, 10}, {2462, 11}},
11002 },
11003
11004 { /* Custom US/Canada */
11005 "ZZF",
11006 .bg_channels = 11,
11007 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11008 {2427, 4}, {2432, 5}, {2437, 6},
11009 {2442, 7}, {2447, 8}, {2452, 9},
11010 {2457, 10}, {2462, 11}},
11011 .a_channels = 8,
11012 .a = {{5180, 36},
11013 {5200, 40},
11014 {5220, 44},
11015 {5240, 48},
11016 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11017 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11018 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11019 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11020 },
11021
11022 { /* Rest of World */
11023 "ZZD",
11024 .bg_channels = 13,
11025 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11026 {2427, 4}, {2432, 5}, {2437, 6},
11027 {2442, 7}, {2447, 8}, {2452, 9},
11028 {2457, 10}, {2462, 11}, {2467, 12},
11029 {2472, 13}},
11030 },
11031
11032 { /* Custom USA & Europe & High */
11033 "ZZA",
11034 .bg_channels = 11,
11035 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11036 {2427, 4}, {2432, 5}, {2437, 6},
11037 {2442, 7}, {2447, 8}, {2452, 9},
11038 {2457, 10}, {2462, 11}},
11039 .a_channels = 13,
11040 .a = {{5180, 36},
11041 {5200, 40},
11042 {5220, 44},
11043 {5240, 48},
11044 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11045 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11046 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11047 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11048 {5745, 149},
11049 {5765, 153},
11050 {5785, 157},
11051 {5805, 161},
11052 {5825, 165}},
11053 },
11054
11055 { /* Custom NA & Europe */
11056 "ZZB",
11057 .bg_channels = 11,
11058 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11059 {2427, 4}, {2432, 5}, {2437, 6},
11060 {2442, 7}, {2447, 8}, {2452, 9},
11061 {2457, 10}, {2462, 11}},
11062 .a_channels = 13,
11063 .a = {{5180, 36},
11064 {5200, 40},
11065 {5220, 44},
11066 {5240, 48},
11067 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11068 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11069 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11070 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11071 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11072 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11073 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11074 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11075 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11076 },
11077
11078 { /* Custom Japan */
11079 "ZZC",
11080 .bg_channels = 11,
11081 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11082 {2427, 4}, {2432, 5}, {2437, 6},
11083 {2442, 7}, {2447, 8}, {2452, 9},
11084 {2457, 10}, {2462, 11}},
11085 .a_channels = 4,
11086 .a = {{5170, 34}, {5190, 38},
11087 {5210, 42}, {5230, 46}},
11088 },
11089
11090 { /* Custom */
11091 "ZZM",
11092 .bg_channels = 11,
11093 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11094 {2427, 4}, {2432, 5}, {2437, 6},
11095 {2442, 7}, {2447, 8}, {2452, 9},
11096 {2457, 10}, {2462, 11}},
11097 },
11098
11099 { /* Europe */
11100 "ZZE",
11101 .bg_channels = 13,
11102 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11103 {2427, 4}, {2432, 5}, {2437, 6},
11104 {2442, 7}, {2447, 8}, {2452, 9},
11105 {2457, 10}, {2462, 11}, {2467, 12},
11106 {2472, 13}},
11107 .a_channels = 19,
11108 .a = {{5180, 36},
11109 {5200, 40},
11110 {5220, 44},
11111 {5240, 48},
11112 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11113 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11114 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11115 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11116 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11117 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11118 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11119 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11120 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11121 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11122 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11123 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11124 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11125 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11126 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11127 },
11128
11129 { /* Custom Japan */
11130 "ZZJ",
11131 .bg_channels = 14,
11132 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11133 {2427, 4}, {2432, 5}, {2437, 6},
11134 {2442, 7}, {2447, 8}, {2452, 9},
11135 {2457, 10}, {2462, 11}, {2467, 12},
11136 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11137 .a_channels = 4,
11138 .a = {{5170, 34}, {5190, 38},
11139 {5210, 42}, {5230, 46}},
11140 },
11141
11142 { /* Rest of World */
11143 "ZZR",
11144 .bg_channels = 14,
11145 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11146 {2427, 4}, {2432, 5}, {2437, 6},
11147 {2442, 7}, {2447, 8}, {2452, 9},
11148 {2457, 10}, {2462, 11}, {2467, 12},
11149 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11150 LIBIPW_CH_PASSIVE_ONLY}},
11151 },
11152
11153 { /* High Band */
11154 "ZZH",
11155 .bg_channels = 13,
11156 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11157 {2427, 4}, {2432, 5}, {2437, 6},
11158 {2442, 7}, {2447, 8}, {2452, 9},
11159 {2457, 10}, {2462, 11},
11160 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11161 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11162 .a_channels = 4,
11163 .a = {{5745, 149}, {5765, 153},
11164 {5785, 157}, {5805, 161}},
11165 },
11166
11167 { /* Custom Europe */
11168 "ZZG",
11169 .bg_channels = 13,
11170 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11171 {2427, 4}, {2432, 5}, {2437, 6},
11172 {2442, 7}, {2447, 8}, {2452, 9},
11173 {2457, 10}, {2462, 11},
11174 {2467, 12}, {2472, 13}},
11175 .a_channels = 4,
11176 .a = {{5180, 36}, {5200, 40},
11177 {5220, 44}, {5240, 48}},
11178 },
11179
11180 { /* Europe */
11181 "ZZK",
11182 .bg_channels = 13,
11183 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11184 {2427, 4}, {2432, 5}, {2437, 6},
11185 {2442, 7}, {2447, 8}, {2452, 9},
11186 {2457, 10}, {2462, 11},
11187 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11188 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11189 .a_channels = 24,
11190 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11191 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11192 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11193 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11194 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11195 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11196 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11197 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11198 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11199 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11200 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11201 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11202 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11203 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11204 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11205 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11206 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11207 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11208 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11209 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11210 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11211 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11212 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11213 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11214 },
11215
11216 { /* Europe */
11217 "ZZL",
11218 .bg_channels = 11,
11219 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11220 {2427, 4}, {2432, 5}, {2437, 6},
11221 {2442, 7}, {2447, 8}, {2452, 9},
11222 {2457, 10}, {2462, 11}},
11223 .a_channels = 13,
11224 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11225 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11226 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11227 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11228 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11229 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11230 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11231 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11232 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11233 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11234 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11235 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11236 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11237 }
11238 };
11239
11240 #define MAX_HW_RESTARTS 5
11241 static int ipw_up(struct ipw_priv *priv)
11242 {
11243 int rc, i, j;
11244
11245 /* Age scan list entries found before suspend */
11246 if (priv->suspend_time) {
11247 libipw_networks_age(priv->ieee, priv->suspend_time);
11248 priv->suspend_time = 0;
11249 }
11250
11251 if (priv->status & STATUS_EXIT_PENDING)
11252 return -EIO;
11253
11254 if (cmdlog && !priv->cmdlog) {
11255 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11256 GFP_KERNEL);
11257 if (priv->cmdlog == NULL) {
11258 IPW_ERROR("Error allocating %d command log entries.\n",
11259 cmdlog);
11260 return -ENOMEM;
11261 } else {
11262 priv->cmdlog_len = cmdlog;
11263 }
11264 }
11265
11266 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11267 /* Load the microcode, firmware, and eeprom.
11268 * Also start the clocks. */
11269 rc = ipw_load(priv);
11270 if (rc) {
11271 IPW_ERROR("Unable to load firmware: %d\n", rc);
11272 return rc;
11273 }
11274
11275 ipw_init_ordinals(priv);
11276 if (!(priv->config & CFG_CUSTOM_MAC))
11277 eeprom_parse_mac(priv, priv->mac_addr);
11278 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11279
11280 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11281 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11282 ipw_geos[j].name, 3))
11283 break;
11284 }
11285 if (j == ARRAY_SIZE(ipw_geos)) {
11286 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11287 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11288 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11289 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11290 j = 0;
11291 }
11292 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11293 IPW_WARNING("Could not set geography.");
11294 return 0;
11295 }
11296
11297 if (priv->status & STATUS_RF_KILL_SW) {
11298 IPW_WARNING("Radio disabled by module parameter.\n");
11299 return 0;
11300 } else if (rf_kill_active(priv)) {
11301 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11302 "Kill switch must be turned off for "
11303 "wireless networking to work.\n");
11304 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11305 2 * HZ);
11306 return 0;
11307 }
11308
11309 rc = ipw_config(priv);
11310 if (!rc) {
11311 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11312
11313 /* If configure to try and auto-associate, kick
11314 * off a scan. */
11315 queue_delayed_work(priv->workqueue,
11316 &priv->request_scan, 0);
11317
11318 return 0;
11319 }
11320
11321 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11322 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11323 i, MAX_HW_RESTARTS);
11324
11325 /* We had an error bringing up the hardware, so take it
11326 * all the way back down so we can try again */
11327 ipw_down(priv);
11328 }
11329
11330 /* tried to restart and config the device for as long as our
11331 * patience could withstand */
11332 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11333
11334 return -EIO;
11335 }
11336
11337 static void ipw_bg_up(struct work_struct *work)
11338 {
11339 struct ipw_priv *priv =
11340 container_of(work, struct ipw_priv, up);
11341 mutex_lock(&priv->mutex);
11342 ipw_up(priv);
11343 mutex_unlock(&priv->mutex);
11344 }
11345
11346 static void ipw_deinit(struct ipw_priv *priv)
11347 {
11348 int i;
11349
11350 if (priv->status & STATUS_SCANNING) {
11351 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11352 ipw_abort_scan(priv);
11353 }
11354
11355 if (priv->status & STATUS_ASSOCIATED) {
11356 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11357 ipw_disassociate(priv);
11358 }
11359
11360 ipw_led_shutdown(priv);
11361
11362 /* Wait up to 1s for status to change to not scanning and not
11363 * associated (disassociation can take a while for a ful 802.11
11364 * exchange */
11365 for (i = 1000; i && (priv->status &
11366 (STATUS_DISASSOCIATING |
11367 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11368 udelay(10);
11369
11370 if (priv->status & (STATUS_DISASSOCIATING |
11371 STATUS_ASSOCIATED | STATUS_SCANNING))
11372 IPW_DEBUG_INFO("Still associated or scanning...\n");
11373 else
11374 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11375
11376 /* Attempt to disable the card */
11377 ipw_send_card_disable(priv, 0);
11378
11379 priv->status &= ~STATUS_INIT;
11380 }
11381
11382 static void ipw_down(struct ipw_priv *priv)
11383 {
11384 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11385
11386 priv->status |= STATUS_EXIT_PENDING;
11387
11388 if (ipw_is_init(priv))
11389 ipw_deinit(priv);
11390
11391 /* Wipe out the EXIT_PENDING status bit if we are not actually
11392 * exiting the module */
11393 if (!exit_pending)
11394 priv->status &= ~STATUS_EXIT_PENDING;
11395
11396 /* tell the device to stop sending interrupts */
11397 ipw_disable_interrupts(priv);
11398
11399 /* Clear all bits but the RF Kill */
11400 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11401 netif_carrier_off(priv->net_dev);
11402
11403 ipw_stop_nic(priv);
11404
11405 ipw_led_radio_off(priv);
11406 }
11407
11408 static void ipw_bg_down(struct work_struct *work)
11409 {
11410 struct ipw_priv *priv =
11411 container_of(work, struct ipw_priv, down);
11412 mutex_lock(&priv->mutex);
11413 ipw_down(priv);
11414 mutex_unlock(&priv->mutex);
11415 }
11416
11417 /* Called by register_netdev() */
11418 static int ipw_net_init(struct net_device *dev)
11419 {
11420 int i, rc = 0;
11421 struct ipw_priv *priv = libipw_priv(dev);
11422 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11423 struct wireless_dev *wdev = &priv->ieee->wdev;
11424 mutex_lock(&priv->mutex);
11425
11426 if (ipw_up(priv)) {
11427 rc = -EIO;
11428 goto out;
11429 }
11430
11431 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11432
11433 /* fill-out priv->ieee->bg_band */
11434 if (geo->bg_channels) {
11435 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11436
11437 bg_band->band = IEEE80211_BAND_2GHZ;
11438 bg_band->n_channels = geo->bg_channels;
11439 bg_band->channels =
11440 kzalloc(geo->bg_channels *
11441 sizeof(struct ieee80211_channel), GFP_KERNEL);
11442 /* translate geo->bg to bg_band.channels */
11443 for (i = 0; i < geo->bg_channels; i++) {
11444 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11445 bg_band->channels[i].center_freq = geo->bg[i].freq;
11446 bg_band->channels[i].hw_value = geo->bg[i].channel;
11447 bg_band->channels[i].max_power = geo->bg[i].max_power;
11448 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11449 bg_band->channels[i].flags |=
11450 IEEE80211_CHAN_PASSIVE_SCAN;
11451 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11452 bg_band->channels[i].flags |=
11453 IEEE80211_CHAN_NO_IBSS;
11454 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11455 bg_band->channels[i].flags |=
11456 IEEE80211_CHAN_RADAR;
11457 /* No equivalent for LIBIPW_CH_80211H_RULES,
11458 LIBIPW_CH_UNIFORM_SPREADING, or
11459 LIBIPW_CH_B_ONLY... */
11460 }
11461 /* point at bitrate info */
11462 bg_band->bitrates = ipw2200_bg_rates;
11463 bg_band->n_bitrates = ipw2200_num_bg_rates;
11464
11465 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11466 }
11467
11468 /* fill-out priv->ieee->a_band */
11469 if (geo->a_channels) {
11470 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11471
11472 a_band->band = IEEE80211_BAND_5GHZ;
11473 a_band->n_channels = geo->a_channels;
11474 a_band->channels =
11475 kzalloc(geo->a_channels *
11476 sizeof(struct ieee80211_channel), GFP_KERNEL);
11477 /* translate geo->bg to a_band.channels */
11478 for (i = 0; i < geo->a_channels; i++) {
11479 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11480 a_band->channels[i].center_freq = geo->a[i].freq;
11481 a_band->channels[i].hw_value = geo->a[i].channel;
11482 a_band->channels[i].max_power = geo->a[i].max_power;
11483 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11484 a_band->channels[i].flags |=
11485 IEEE80211_CHAN_PASSIVE_SCAN;
11486 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11487 a_band->channels[i].flags |=
11488 IEEE80211_CHAN_NO_IBSS;
11489 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11490 a_band->channels[i].flags |=
11491 IEEE80211_CHAN_RADAR;
11492 /* No equivalent for LIBIPW_CH_80211H_RULES,
11493 LIBIPW_CH_UNIFORM_SPREADING, or
11494 LIBIPW_CH_B_ONLY... */
11495 }
11496 /* point at bitrate info */
11497 a_band->bitrates = ipw2200_a_rates;
11498 a_band->n_bitrates = ipw2200_num_a_rates;
11499
11500 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11501 }
11502
11503 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11504
11505 /* With that information in place, we can now register the wiphy... */
11506 if (wiphy_register(wdev->wiphy)) {
11507 rc = -EIO;
11508 goto out;
11509 }
11510
11511 out:
11512 mutex_unlock(&priv->mutex);
11513 return rc;
11514 }
11515
11516 /* PCI driver stuff */
11517 static struct pci_device_id card_ids[] = {
11518 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11519 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11520 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11521 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11522 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11523 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11524 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11525 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11526 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11527 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11531 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11532 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11533 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11534 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11535 {PCI_VDEVICE(INTEL, 0x104f), 0},
11536 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11537 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11538 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11539 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11540
11541 /* required last entry */
11542 {0,}
11543 };
11544
11545 MODULE_DEVICE_TABLE(pci, card_ids);
11546
11547 static struct attribute *ipw_sysfs_entries[] = {
11548 &dev_attr_rf_kill.attr,
11549 &dev_attr_direct_dword.attr,
11550 &dev_attr_indirect_byte.attr,
11551 &dev_attr_indirect_dword.attr,
11552 &dev_attr_mem_gpio_reg.attr,
11553 &dev_attr_command_event_reg.attr,
11554 &dev_attr_nic_type.attr,
11555 &dev_attr_status.attr,
11556 &dev_attr_cfg.attr,
11557 &dev_attr_error.attr,
11558 &dev_attr_event_log.attr,
11559 &dev_attr_cmd_log.attr,
11560 &dev_attr_eeprom_delay.attr,
11561 &dev_attr_ucode_version.attr,
11562 &dev_attr_rtc.attr,
11563 &dev_attr_scan_age.attr,
11564 &dev_attr_led.attr,
11565 &dev_attr_speed_scan.attr,
11566 &dev_attr_net_stats.attr,
11567 &dev_attr_channels.attr,
11568 #ifdef CONFIG_IPW2200_PROMISCUOUS
11569 &dev_attr_rtap_iface.attr,
11570 &dev_attr_rtap_filter.attr,
11571 #endif
11572 NULL
11573 };
11574
11575 static struct attribute_group ipw_attribute_group = {
11576 .name = NULL, /* put in device directory */
11577 .attrs = ipw_sysfs_entries,
11578 };
11579
11580 #ifdef CONFIG_IPW2200_PROMISCUOUS
11581 static int ipw_prom_open(struct net_device *dev)
11582 {
11583 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11584 struct ipw_priv *priv = prom_priv->priv;
11585
11586 IPW_DEBUG_INFO("prom dev->open\n");
11587 netif_carrier_off(dev);
11588
11589 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11590 priv->sys_config.accept_all_data_frames = 1;
11591 priv->sys_config.accept_non_directed_frames = 1;
11592 priv->sys_config.accept_all_mgmt_bcpr = 1;
11593 priv->sys_config.accept_all_mgmt_frames = 1;
11594
11595 ipw_send_system_config(priv);
11596 }
11597
11598 return 0;
11599 }
11600
11601 static int ipw_prom_stop(struct net_device *dev)
11602 {
11603 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11604 struct ipw_priv *priv = prom_priv->priv;
11605
11606 IPW_DEBUG_INFO("prom dev->stop\n");
11607
11608 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11609 priv->sys_config.accept_all_data_frames = 0;
11610 priv->sys_config.accept_non_directed_frames = 0;
11611 priv->sys_config.accept_all_mgmt_bcpr = 0;
11612 priv->sys_config.accept_all_mgmt_frames = 0;
11613
11614 ipw_send_system_config(priv);
11615 }
11616
11617 return 0;
11618 }
11619
11620 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11621 struct net_device *dev)
11622 {
11623 IPW_DEBUG_INFO("prom dev->xmit\n");
11624 dev_kfree_skb(skb);
11625 return NETDEV_TX_OK;
11626 }
11627
11628 static const struct net_device_ops ipw_prom_netdev_ops = {
11629 .ndo_open = ipw_prom_open,
11630 .ndo_stop = ipw_prom_stop,
11631 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11632 .ndo_change_mtu = libipw_change_mtu,
11633 .ndo_set_mac_address = eth_mac_addr,
11634 .ndo_validate_addr = eth_validate_addr,
11635 };
11636
11637 static int ipw_prom_alloc(struct ipw_priv *priv)
11638 {
11639 int rc = 0;
11640
11641 if (priv->prom_net_dev)
11642 return -EPERM;
11643
11644 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
11645 if (priv->prom_net_dev == NULL)
11646 return -ENOMEM;
11647
11648 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11649 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11650 priv->prom_priv->priv = priv;
11651
11652 strcpy(priv->prom_net_dev->name, "rtap%d");
11653 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11654
11655 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11656 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11657
11658 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11659 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11660
11661 rc = register_netdev(priv->prom_net_dev);
11662 if (rc) {
11663 free_ieee80211(priv->prom_net_dev, 1);
11664 priv->prom_net_dev = NULL;
11665 return rc;
11666 }
11667
11668 return 0;
11669 }
11670
11671 static void ipw_prom_free(struct ipw_priv *priv)
11672 {
11673 if (!priv->prom_net_dev)
11674 return;
11675
11676 unregister_netdev(priv->prom_net_dev);
11677 free_ieee80211(priv->prom_net_dev, 1);
11678
11679 priv->prom_net_dev = NULL;
11680 }
11681
11682 #endif
11683
11684 static const struct net_device_ops ipw_netdev_ops = {
11685 .ndo_init = ipw_net_init,
11686 .ndo_open = ipw_net_open,
11687 .ndo_stop = ipw_net_stop,
11688 .ndo_set_multicast_list = ipw_net_set_multicast_list,
11689 .ndo_set_mac_address = ipw_net_set_mac_address,
11690 .ndo_start_xmit = libipw_xmit,
11691 .ndo_change_mtu = libipw_change_mtu,
11692 .ndo_validate_addr = eth_validate_addr,
11693 };
11694
11695 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11696 const struct pci_device_id *ent)
11697 {
11698 int err = 0;
11699 struct net_device *net_dev;
11700 void __iomem *base;
11701 u32 length, val;
11702 struct ipw_priv *priv;
11703 int i;
11704
11705 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
11706 if (net_dev == NULL) {
11707 err = -ENOMEM;
11708 goto out;
11709 }
11710
11711 priv = libipw_priv(net_dev);
11712 priv->ieee = netdev_priv(net_dev);
11713
11714 priv->net_dev = net_dev;
11715 priv->pci_dev = pdev;
11716 ipw_debug_level = debug;
11717 spin_lock_init(&priv->irq_lock);
11718 spin_lock_init(&priv->lock);
11719 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11720 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11721
11722 mutex_init(&priv->mutex);
11723 if (pci_enable_device(pdev)) {
11724 err = -ENODEV;
11725 goto out_free_ieee80211;
11726 }
11727
11728 pci_set_master(pdev);
11729
11730 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11731 if (!err)
11732 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11733 if (err) {
11734 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11735 goto out_pci_disable_device;
11736 }
11737
11738 pci_set_drvdata(pdev, priv);
11739
11740 err = pci_request_regions(pdev, DRV_NAME);
11741 if (err)
11742 goto out_pci_disable_device;
11743
11744 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11745 * PCI Tx retries from interfering with C3 CPU state */
11746 pci_read_config_dword(pdev, 0x40, &val);
11747 if ((val & 0x0000ff00) != 0)
11748 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11749
11750 length = pci_resource_len(pdev, 0);
11751 priv->hw_len = length;
11752
11753 base = pci_ioremap_bar(pdev, 0);
11754 if (!base) {
11755 err = -ENODEV;
11756 goto out_pci_release_regions;
11757 }
11758
11759 priv->hw_base = base;
11760 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11761 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11762
11763 err = ipw_setup_deferred_work(priv);
11764 if (err) {
11765 IPW_ERROR("Unable to setup deferred work\n");
11766 goto out_iounmap;
11767 }
11768
11769 ipw_sw_reset(priv, 1);
11770
11771 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11772 if (err) {
11773 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11774 goto out_destroy_workqueue;
11775 }
11776
11777 SET_NETDEV_DEV(net_dev, &pdev->dev);
11778
11779 mutex_lock(&priv->mutex);
11780
11781 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11782 priv->ieee->set_security = shim__set_security;
11783 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11784
11785 #ifdef CONFIG_IPW2200_QOS
11786 priv->ieee->is_qos_active = ipw_is_qos_active;
11787 priv->ieee->handle_probe_response = ipw_handle_beacon;
11788 priv->ieee->handle_beacon = ipw_handle_probe_response;
11789 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11790 #endif /* CONFIG_IPW2200_QOS */
11791
11792 priv->ieee->perfect_rssi = -20;
11793 priv->ieee->worst_rssi = -85;
11794
11795 net_dev->netdev_ops = &ipw_netdev_ops;
11796 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11797 net_dev->wireless_data = &priv->wireless_data;
11798 net_dev->wireless_handlers = &ipw_wx_handler_def;
11799 net_dev->ethtool_ops = &ipw_ethtool_ops;
11800 net_dev->irq = pdev->irq;
11801 net_dev->base_addr = (unsigned long)priv->hw_base;
11802 net_dev->mem_start = pci_resource_start(pdev, 0);
11803 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11804
11805 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11806 if (err) {
11807 IPW_ERROR("failed to create sysfs device attributes\n");
11808 mutex_unlock(&priv->mutex);
11809 goto out_release_irq;
11810 }
11811
11812 mutex_unlock(&priv->mutex);
11813 err = register_netdev(net_dev);
11814 if (err) {
11815 IPW_ERROR("failed to register network device\n");
11816 goto out_remove_sysfs;
11817 }
11818
11819 #ifdef CONFIG_IPW2200_PROMISCUOUS
11820 if (rtap_iface) {
11821 err = ipw_prom_alloc(priv);
11822 if (err) {
11823 IPW_ERROR("Failed to register promiscuous network "
11824 "device (error %d).\n", err);
11825 unregister_ieee80211(priv->ieee);
11826 unregister_netdev(priv->net_dev);
11827 goto out_remove_sysfs;
11828 }
11829 }
11830 #endif
11831
11832 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11833 "channels, %d 802.11a channels)\n",
11834 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11835 priv->ieee->geo.a_channels);
11836
11837 return 0;
11838
11839 out_remove_sysfs:
11840 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11841 out_release_irq:
11842 free_irq(pdev->irq, priv);
11843 out_destroy_workqueue:
11844 destroy_workqueue(priv->workqueue);
11845 priv->workqueue = NULL;
11846 out_iounmap:
11847 iounmap(priv->hw_base);
11848 out_pci_release_regions:
11849 pci_release_regions(pdev);
11850 out_pci_disable_device:
11851 pci_disable_device(pdev);
11852 pci_set_drvdata(pdev, NULL);
11853 out_free_ieee80211:
11854 free_ieee80211(priv->net_dev, 0);
11855 out:
11856 return err;
11857 }
11858
11859 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11860 {
11861 struct ipw_priv *priv = pci_get_drvdata(pdev);
11862 struct list_head *p, *q;
11863 int i;
11864
11865 if (!priv)
11866 return;
11867
11868 mutex_lock(&priv->mutex);
11869
11870 priv->status |= STATUS_EXIT_PENDING;
11871 ipw_down(priv);
11872 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11873
11874 mutex_unlock(&priv->mutex);
11875
11876 unregister_ieee80211(priv->ieee);
11877 unregister_netdev(priv->net_dev);
11878
11879 if (priv->rxq) {
11880 ipw_rx_queue_free(priv, priv->rxq);
11881 priv->rxq = NULL;
11882 }
11883 ipw_tx_queue_free(priv);
11884
11885 if (priv->cmdlog) {
11886 kfree(priv->cmdlog);
11887 priv->cmdlog = NULL;
11888 }
11889 /* ipw_down will ensure that there is no more pending work
11890 * in the workqueue's, so we can safely remove them now. */
11891 cancel_delayed_work(&priv->adhoc_check);
11892 cancel_delayed_work(&priv->gather_stats);
11893 cancel_delayed_work(&priv->request_scan);
11894 cancel_delayed_work(&priv->request_direct_scan);
11895 cancel_delayed_work(&priv->request_passive_scan);
11896 cancel_delayed_work(&priv->scan_event);
11897 cancel_delayed_work(&priv->rf_kill);
11898 cancel_delayed_work(&priv->scan_check);
11899 destroy_workqueue(priv->workqueue);
11900 priv->workqueue = NULL;
11901
11902 /* Free MAC hash list for ADHOC */
11903 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11904 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11905 list_del(p);
11906 kfree(list_entry(p, struct ipw_ibss_seq, list));
11907 }
11908 }
11909
11910 kfree(priv->error);
11911 priv->error = NULL;
11912
11913 #ifdef CONFIG_IPW2200_PROMISCUOUS
11914 ipw_prom_free(priv);
11915 #endif
11916
11917 free_irq(pdev->irq, priv);
11918 iounmap(priv->hw_base);
11919 pci_release_regions(pdev);
11920 pci_disable_device(pdev);
11921 pci_set_drvdata(pdev, NULL);
11922 free_ieee80211(priv->net_dev, 0);
11923 free_firmware();
11924 }
11925
11926 #ifdef CONFIG_PM
11927 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11928 {
11929 struct ipw_priv *priv = pci_get_drvdata(pdev);
11930 struct net_device *dev = priv->net_dev;
11931
11932 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11933
11934 /* Take down the device; powers it off, etc. */
11935 ipw_down(priv);
11936
11937 /* Remove the PRESENT state of the device */
11938 netif_device_detach(dev);
11939
11940 pci_save_state(pdev);
11941 pci_disable_device(pdev);
11942 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11943
11944 priv->suspend_at = get_seconds();
11945
11946 return 0;
11947 }
11948
11949 static int ipw_pci_resume(struct pci_dev *pdev)
11950 {
11951 struct ipw_priv *priv = pci_get_drvdata(pdev);
11952 struct net_device *dev = priv->net_dev;
11953 int err;
11954 u32 val;
11955
11956 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11957
11958 pci_set_power_state(pdev, PCI_D0);
11959 err = pci_enable_device(pdev);
11960 if (err) {
11961 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11962 dev->name);
11963 return err;
11964 }
11965 pci_restore_state(pdev);
11966
11967 /*
11968 * Suspend/Resume resets the PCI configuration space, so we have to
11969 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11970 * from interfering with C3 CPU state. pci_restore_state won't help
11971 * here since it only restores the first 64 bytes pci config header.
11972 */
11973 pci_read_config_dword(pdev, 0x40, &val);
11974 if ((val & 0x0000ff00) != 0)
11975 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11976
11977 /* Set the device back into the PRESENT state; this will also wake
11978 * the queue of needed */
11979 netif_device_attach(dev);
11980
11981 priv->suspend_time = get_seconds() - priv->suspend_at;
11982
11983 /* Bring the device back up */
11984 queue_work(priv->workqueue, &priv->up);
11985
11986 return 0;
11987 }
11988 #endif
11989
11990 static void ipw_pci_shutdown(struct pci_dev *pdev)
11991 {
11992 struct ipw_priv *priv = pci_get_drvdata(pdev);
11993
11994 /* Take down the device; powers it off, etc. */
11995 ipw_down(priv);
11996
11997 pci_disable_device(pdev);
11998 }
11999
12000 /* driver initialization stuff */
12001 static struct pci_driver ipw_driver = {
12002 .name = DRV_NAME,
12003 .id_table = card_ids,
12004 .probe = ipw_pci_probe,
12005 .remove = __devexit_p(ipw_pci_remove),
12006 #ifdef CONFIG_PM
12007 .suspend = ipw_pci_suspend,
12008 .resume = ipw_pci_resume,
12009 #endif
12010 .shutdown = ipw_pci_shutdown,
12011 };
12012
12013 static int __init ipw_init(void)
12014 {
12015 int ret;
12016
12017 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12018 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12019
12020 ret = pci_register_driver(&ipw_driver);
12021 if (ret) {
12022 IPW_ERROR("Unable to initialize PCI module\n");
12023 return ret;
12024 }
12025
12026 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12027 if (ret) {
12028 IPW_ERROR("Unable to create driver sysfs file\n");
12029 pci_unregister_driver(&ipw_driver);
12030 return ret;
12031 }
12032
12033 return ret;
12034 }
12035
12036 static void __exit ipw_exit(void)
12037 {
12038 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12039 pci_unregister_driver(&ipw_driver);
12040 }
12041
12042 module_param(disable, int, 0444);
12043 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12044
12045 module_param(associate, int, 0444);
12046 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12047
12048 module_param(auto_create, int, 0444);
12049 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12050
12051 module_param_named(led, led_support, int, 0444);
12052 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
12053
12054 module_param(debug, int, 0444);
12055 MODULE_PARM_DESC(debug, "debug output mask");
12056
12057 module_param_named(channel, default_channel, int, 0444);
12058 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12059
12060 #ifdef CONFIG_IPW2200_PROMISCUOUS
12061 module_param(rtap_iface, int, 0444);
12062 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12063 #endif
12064
12065 #ifdef CONFIG_IPW2200_QOS
12066 module_param(qos_enable, int, 0444);
12067 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12068
12069 module_param(qos_burst_enable, int, 0444);
12070 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12071
12072 module_param(qos_no_ack_mask, int, 0444);
12073 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12074
12075 module_param(burst_duration_CCK, int, 0444);
12076 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12077
12078 module_param(burst_duration_OFDM, int, 0444);
12079 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12080 #endif /* CONFIG_IPW2200_QOS */
12081
12082 #ifdef CONFIG_IPW2200_MONITOR
12083 module_param_named(mode, network_mode, int, 0444);
12084 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12085 #else
12086 module_param_named(mode, network_mode, int, 0444);
12087 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12088 #endif
12089
12090 module_param(bt_coexist, int, 0444);
12091 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12092
12093 module_param(hwcrypto, int, 0444);
12094 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12095
12096 module_param(cmdlog, int, 0444);
12097 MODULE_PARM_DESC(cmdlog,
12098 "allocate a ring buffer for logging firmware commands");
12099
12100 module_param(roaming, int, 0444);
12101 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12102
12103 module_param(antenna, int, 0444);
12104 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12105
12106 module_exit(ipw_exit);
12107 module_init(ipw_init);