Merge branch 'hpfs' from Mikulas Patocka
[deliverable/linux.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38
39
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION IPW2200_VERSION
80
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
113 #endif
114
115 static struct ieee80211_rate ipw2200_rates[] = {
116 { .bitrate = 10 },
117 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 { .bitrate = 60 },
121 { .bitrate = 90 },
122 { .bitrate = 120 },
123 { .bitrate = 180 },
124 { .bitrate = 240 },
125 { .bitrate = 360 },
126 { .bitrate = 480 },
127 { .bitrate = 540 }
128 };
129
130 #define ipw2200_a_rates (ipw2200_rates + 4)
131 #define ipw2200_num_a_rates 8
132 #define ipw2200_bg_rates (ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates 12
134
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136 * There are certianly some conditions that will break this (like feeding it '30')
137 * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 (((x) <= 14) ? \
140 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 ((x) + 1000) * 5)
142
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 QOS_TX3_CW_MIN_OFDM},
153 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 QOS_TX3_CW_MAX_OFDM},
155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 QOS_TX3_CW_MIN_CCK},
164 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 QOS_TX3_CW_MAX_CCK},
166 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 DEF_TX3_CW_MIN_OFDM},
175 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 DEF_TX3_CW_MAX_OFDM},
177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 DEF_TX3_CW_MIN_CCK},
186 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 DEF_TX3_CW_MAX_CCK},
188 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195
196 static int from_priority_to_tx_queue[] = {
197 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 *qos_param);
207 #endif /* CONFIG_IPW2200_QOS */
208
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 int len, int sync);
218
219 static void ipw_tx_queue_free(struct ipw_priv *);
220
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233
234 static int snprint_line(char *buf, size_t count,
235 const u8 * data, u32 len, u32 ofs)
236 {
237 int out, i, j, l;
238 char c;
239
240 out = snprintf(buf, count, "%08X", ofs);
241
242 for (l = 0, i = 0; i < 2; i++) {
243 out += snprintf(buf + out, count - out, " ");
244 for (j = 0; j < 8 && l < len; j++, l++)
245 out += snprintf(buf + out, count - out, "%02X ",
246 data[(i * 8 + j)]);
247 for (; j < 8; j++)
248 out += snprintf(buf + out, count - out, " ");
249 }
250
251 out += snprintf(buf + out, count - out, " ");
252 for (l = 0, i = 0; i < 2; i++) {
253 out += snprintf(buf + out, count - out, " ");
254 for (j = 0; j < 8 && l < len; j++, l++) {
255 c = data[(i * 8 + j)];
256 if (!isascii(c) || !isprint(c))
257 c = '.';
258
259 out += snprintf(buf + out, count - out, "%c", c);
260 }
261
262 for (; j < 8; j++)
263 out += snprintf(buf + out, count - out, " ");
264 }
265
266 return out;
267 }
268
269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 char line[81];
272 u32 ofs = 0;
273 if (!(ipw_debug_level & level))
274 return;
275
276 while (len) {
277 snprint_line(line, sizeof(line), &data[ofs],
278 min(len, 16U), ofs);
279 printk(KERN_DEBUG "%s\n", line);
280 ofs += 16;
281 len -= min(len, 16U);
282 }
283 }
284
285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 size_t out = size;
288 u32 ofs = 0;
289 int total = 0;
290
291 while (size && len) {
292 out = snprint_line(output, size, &data[ofs],
293 min_t(size_t, len, 16U), ofs);
294
295 ofs += 16;
296 output += out;
297 size -= out;
298 len -= min_t(size_t, len, 16U);
299 total += out;
300 }
301 return total;
302 }
303
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 __LINE__, (u32) (b), (u32) (c));
318 _ipw_write_reg8(a, b, c);
319 }
320
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 __LINE__, (u32) (b), (u32) (c));
327 _ipw_write_reg16(a, b, c);
328 }
329
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 __LINE__, (u32) (b), (u32) (c));
336 _ipw_write_reg32(a, b, c);
337 }
338
339 /* 8-bit direct write (low 4K) */
340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 u8 val)
342 {
343 writeb(val, ipw->hw_base + ofs);
344 }
345
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 __LINE__, (u32)(ofs), (u32)(val)); \
350 _ipw_write8(ipw, ofs, val); \
351 } while (0)
352
353 /* 16-bit direct write (low 4K) */
354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 u16 val)
356 {
357 writew(val, ipw->hw_base + ofs);
358 }
359
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 __LINE__, (u32)(ofs), (u32)(val)); \
364 _ipw_write16(ipw, ofs, val); \
365 } while (0)
366
367 /* 32-bit direct write (low 4K) */
368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 u32 val)
370 {
371 writel(val, ipw->hw_base + ofs);
372 }
373
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 __LINE__, (u32)(ofs), (u32)(val)); \
378 _ipw_write32(ipw, ofs, val); \
379 } while (0)
380
381 /* 8-bit direct read (low 4K) */
382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 return readb(ipw->hw_base + ofs);
385 }
386
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 (u32)(ofs)); \
391 _ipw_read8(ipw, ofs); \
392 })
393
394 /* 16-bit direct read (low 4K) */
395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 return readw(ipw->hw_base + ofs);
398 }
399
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 (u32)(ofs)); \
404 _ipw_read16(ipw, ofs); \
405 })
406
407 /* 32-bit direct read (low 4K) */
408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 return readl(ipw->hw_base + ofs);
411 }
412
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 (u32)(ofs)); \
417 _ipw_read32(ipw, ofs); \
418 })
419
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 __LINE__, (u32)(b), (u32)(d)); \
425 _ipw_read_indirect(a, b, c, d); \
426 })
427
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 __LINE__, (u32)(b), (u32)(d)); \
434 _ipw_write_indirect(a, b, c, d); \
435 } while (0)
436
437 /* 32-bit indirect write (above 4K) */
438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444
445 /* 8-bit indirect write (above 4K) */
446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
449 u32 dif_len = reg - aligned_addr;
450
451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455
456 /* 16-bit indirect write (above 4K) */
457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
460 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461
462 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466
467 /* 8-bit indirect read (above 4K) */
468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 u32 word;
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476
477 /* 32-bit indirect read (above 4K) */
478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 u32 value;
481
482 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483
484 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 return value;
488 }
489
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /* for area above 1st 4K of SRAM/reg space */
492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 int num)
494 {
495 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
496 u32 dif_len = addr - aligned_addr;
497 u32 i;
498
499 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500
501 if (num <= 0) {
502 return;
503 }
504
505 /* Read the first dword (or portion) byte by byte */
506 if (unlikely(dif_len)) {
507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 /* Start reading at aligned_addr + dif_len */
509 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 aligned_addr += 4;
512 }
513
514 /* Read all of the middle dwords as dwords, with auto-increment */
515 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518
519 /* Read the last dword (or portion) byte by byte */
520 if (unlikely(num)) {
521 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 for (i = 0; num > 0; i++, num--)
523 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 }
525 }
526
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /* for area above 1st 4K of SRAM/reg space */
529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 int num)
531 {
532 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
533 u32 dif_len = addr - aligned_addr;
534 u32 i;
535
536 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537
538 if (num <= 0) {
539 return;
540 }
541
542 /* Write the first dword (or portion) byte by byte */
543 if (unlikely(dif_len)) {
544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 /* Start writing at aligned_addr + dif_len */
546 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 aligned_addr += 4;
549 }
550
551 /* Write all of the middle dwords as dwords, with auto-increment */
552 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555
556 /* Write the last dword (or portion) byte by byte */
557 if (unlikely(num)) {
558 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 for (i = 0; num > 0; i++, num--, buf++)
560 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 }
562 }
563
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /* for 1st 4K of SRAM/regs space */
566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 int num)
568 {
569 memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571
572 /* Set bit(s) in low 4K of SRAM/regs */
573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577
578 /* Clear bit(s) in low 4K of SRAM/regs */
579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583
584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 if (priv->status & STATUS_INT_ENABLED)
587 return;
588 priv->status |= STATUS_INT_ENABLED;
589 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591
592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 if (!(priv->status & STATUS_INT_ENABLED))
595 return;
596 priv->status &= ~STATUS_INT_ENABLED;
597 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599
600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 unsigned long flags;
603
604 spin_lock_irqsave(&priv->irq_lock, flags);
605 __ipw_enable_interrupts(priv);
606 spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608
609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 unsigned long flags;
612
613 spin_lock_irqsave(&priv->irq_lock, flags);
614 __ipw_disable_interrupts(priv);
615 spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617
618 static char *ipw_error_desc(u32 val)
619 {
620 switch (val) {
621 case IPW_FW_ERROR_OK:
622 return "ERROR_OK";
623 case IPW_FW_ERROR_FAIL:
624 return "ERROR_FAIL";
625 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 return "MEMORY_UNDERFLOW";
627 case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 return "MEMORY_OVERFLOW";
629 case IPW_FW_ERROR_BAD_PARAM:
630 return "BAD_PARAM";
631 case IPW_FW_ERROR_BAD_CHECKSUM:
632 return "BAD_CHECKSUM";
633 case IPW_FW_ERROR_NMI_INTERRUPT:
634 return "NMI_INTERRUPT";
635 case IPW_FW_ERROR_BAD_DATABASE:
636 return "BAD_DATABASE";
637 case IPW_FW_ERROR_ALLOC_FAIL:
638 return "ALLOC_FAIL";
639 case IPW_FW_ERROR_DMA_UNDERRUN:
640 return "DMA_UNDERRUN";
641 case IPW_FW_ERROR_DMA_STATUS:
642 return "DMA_STATUS";
643 case IPW_FW_ERROR_DINO_ERROR:
644 return "DINO_ERROR";
645 case IPW_FW_ERROR_EEPROM_ERROR:
646 return "EEPROM_ERROR";
647 case IPW_FW_ERROR_SYSASSERT:
648 return "SYSASSERT";
649 case IPW_FW_ERROR_FATAL_ERROR:
650 return "FATAL_ERROR";
651 default:
652 return "UNKNOWN_ERROR";
653 }
654 }
655
656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 struct ipw_fw_error *error)
658 {
659 u32 i;
660
661 if (!error) {
662 IPW_ERROR("Error allocating and capturing error log. "
663 "Nothing to dump.\n");
664 return;
665 }
666
667 IPW_ERROR("Start IPW Error Log Dump:\n");
668 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 error->status, error->config);
670
671 for (i = 0; i < error->elem_len; i++)
672 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
673 ipw_error_desc(error->elem[i].desc),
674 error->elem[i].time,
675 error->elem[i].blink1,
676 error->elem[i].blink2,
677 error->elem[i].link1,
678 error->elem[i].link2, error->elem[i].data);
679 for (i = 0; i < error->log_len; i++)
680 IPW_ERROR("%i\t0x%08x\t%i\n",
681 error->log[i].time,
682 error->log[i].data, error->log[i].event);
683 }
684
685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689
690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 u32 addr, field_info, field_len, field_count, total_len;
693
694 IPW_DEBUG_ORD("ordinal = %i\n", ord);
695
696 if (!priv || !val || !len) {
697 IPW_DEBUG_ORD("Invalid argument\n");
698 return -EINVAL;
699 }
700
701 /* verify device ordinal tables have been initialized */
702 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 return -EINVAL;
705 }
706
707 switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 case IPW_ORD_TABLE_0_MASK:
709 /*
710 * TABLE 0: Direct access to a table of 32 bit values
711 *
712 * This is a very simple table with the data directly
713 * read from the table
714 */
715
716 /* remove the table id from the ordinal */
717 ord &= IPW_ORD_TABLE_VALUE_MASK;
718
719 /* boundary check */
720 if (ord > priv->table0_len) {
721 IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 "max (%i)\n", ord, priv->table0_len);
723 return -EINVAL;
724 }
725
726 /* verify we have enough room to store the value */
727 if (*len < sizeof(u32)) {
728 IPW_DEBUG_ORD("ordinal buffer length too small, "
729 "need %zd\n", sizeof(u32));
730 return -EINVAL;
731 }
732
733 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 ord, priv->table0_addr + (ord << 2));
735
736 *len = sizeof(u32);
737 ord <<= 2;
738 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 break;
740
741 case IPW_ORD_TABLE_1_MASK:
742 /*
743 * TABLE 1: Indirect access to a table of 32 bit values
744 *
745 * This is a fairly large table of u32 values each
746 * representing starting addr for the data (which is
747 * also a u32)
748 */
749
750 /* remove the table id from the ordinal */
751 ord &= IPW_ORD_TABLE_VALUE_MASK;
752
753 /* boundary check */
754 if (ord > priv->table1_len) {
755 IPW_DEBUG_ORD("ordinal value too long\n");
756 return -EINVAL;
757 }
758
759 /* verify we have enough room to store the value */
760 if (*len < sizeof(u32)) {
761 IPW_DEBUG_ORD("ordinal buffer length too small, "
762 "need %zd\n", sizeof(u32));
763 return -EINVAL;
764 }
765
766 *((u32 *) val) =
767 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 *len = sizeof(u32);
769 break;
770
771 case IPW_ORD_TABLE_2_MASK:
772 /*
773 * TABLE 2: Indirect access to a table of variable sized values
774 *
775 * This table consist of six values, each containing
776 * - dword containing the starting offset of the data
777 * - dword containing the lengh in the first 16bits
778 * and the count in the second 16bits
779 */
780
781 /* remove the table id from the ordinal */
782 ord &= IPW_ORD_TABLE_VALUE_MASK;
783
784 /* boundary check */
785 if (ord > priv->table2_len) {
786 IPW_DEBUG_ORD("ordinal value too long\n");
787 return -EINVAL;
788 }
789
790 /* get the address of statistic */
791 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792
793 /* get the second DW of statistics ;
794 * two 16-bit words - first is length, second is count */
795 field_info =
796 ipw_read_reg32(priv,
797 priv->table2_addr + (ord << 3) +
798 sizeof(u32));
799
800 /* get each entry length */
801 field_len = *((u16 *) & field_info);
802
803 /* get number of entries */
804 field_count = *(((u16 *) & field_info) + 1);
805
806 /* abort if not enough memory */
807 total_len = field_len * field_count;
808 if (total_len > *len) {
809 *len = total_len;
810 return -EINVAL;
811 }
812
813 *len = total_len;
814 if (!total_len)
815 return 0;
816
817 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 "field_info = 0x%08x\n",
819 addr, total_len, field_info);
820 ipw_read_indirect(priv, addr, val, total_len);
821 break;
822
823 default:
824 IPW_DEBUG_ORD("Invalid ordinal!\n");
825 return -EINVAL;
826
827 }
828
829 return 0;
830 }
831
832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 priv->table0_len = ipw_read32(priv, priv->table0_addr);
836
837 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 priv->table0_addr, priv->table0_len);
839
840 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842
843 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 priv->table1_addr, priv->table1_len);
845
846 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 priv->table2_len &= 0x0000ffff; /* use first two bytes */
849
850 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 priv->table2_addr, priv->table2_len);
852
853 }
854
855 static u32 ipw_register_toggle(u32 reg)
856 {
857 reg &= ~IPW_START_STANDBY;
858 if (reg & IPW_GATE_ODMA)
859 reg &= ~IPW_GATE_ODMA;
860 if (reg & IPW_GATE_IDMA)
861 reg &= ~IPW_GATE_IDMA;
862 if (reg & IPW_GATE_ADMA)
863 reg &= ~IPW_GATE_ADMA;
864 return reg;
865 }
866
867 /*
868 * LED behavior:
869 * - On radio ON, turn on any LEDs that require to be on during start
870 * - On initialization, start unassociated blink
871 * - On association, disable unassociated blink
872 * - On disassociation, start unassociated blink
873 * - On radio OFF, turn off any LEDs started during radio on
874 *
875 */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879
880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 unsigned long flags;
883 u32 led;
884
885 /* If configured to not use LEDs, or nic_type is 1,
886 * then we don't toggle a LINK led */
887 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 return;
889
890 spin_lock_irqsave(&priv->lock, flags);
891
892 if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 !(priv->status & STATUS_LED_LINK_ON)) {
894 IPW_DEBUG_LED("Link LED On\n");
895 led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 led |= priv->led_association_on;
897
898 led = ipw_register_toggle(led);
899
900 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 ipw_write_reg32(priv, IPW_EVENT_REG, led);
902
903 priv->status |= STATUS_LED_LINK_ON;
904
905 /* If we aren't associated, schedule turning the LED off */
906 if (!(priv->status & STATUS_ASSOCIATED))
907 schedule_delayed_work(&priv->led_link_off,
908 LD_TIME_LINK_ON);
909 }
910
911 spin_unlock_irqrestore(&priv->lock, flags);
912 }
913
914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 struct ipw_priv *priv =
917 container_of(work, struct ipw_priv, led_link_on.work);
918 mutex_lock(&priv->mutex);
919 ipw_led_link_on(priv);
920 mutex_unlock(&priv->mutex);
921 }
922
923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 unsigned long flags;
926 u32 led;
927
928 /* If configured not to use LEDs, or nic type is 1,
929 * then we don't goggle the LINK led. */
930 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 return;
932
933 spin_lock_irqsave(&priv->lock, flags);
934
935 if (priv->status & STATUS_LED_LINK_ON) {
936 led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 led &= priv->led_association_off;
938 led = ipw_register_toggle(led);
939
940 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 ipw_write_reg32(priv, IPW_EVENT_REG, led);
942
943 IPW_DEBUG_LED("Link LED Off\n");
944
945 priv->status &= ~STATUS_LED_LINK_ON;
946
947 /* If we aren't associated and the radio is on, schedule
948 * turning the LED on (blink while unassociated) */
949 if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 !(priv->status & STATUS_ASSOCIATED))
951 schedule_delayed_work(&priv->led_link_on,
952 LD_TIME_LINK_OFF);
953
954 }
955
956 spin_unlock_irqrestore(&priv->lock, flags);
957 }
958
959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 struct ipw_priv *priv =
962 container_of(work, struct ipw_priv, led_link_off.work);
963 mutex_lock(&priv->mutex);
964 ipw_led_link_off(priv);
965 mutex_unlock(&priv->mutex);
966 }
967
968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 u32 led;
971
972 if (priv->config & CFG_NO_LED)
973 return;
974
975 if (priv->status & STATUS_RF_KILL_MASK)
976 return;
977
978 if (!(priv->status & STATUS_LED_ACT_ON)) {
979 led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 led |= priv->led_activity_on;
981
982 led = ipw_register_toggle(led);
983
984 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 ipw_write_reg32(priv, IPW_EVENT_REG, led);
986
987 IPW_DEBUG_LED("Activity LED On\n");
988
989 priv->status |= STATUS_LED_ACT_ON;
990
991 cancel_delayed_work(&priv->led_act_off);
992 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 } else {
994 /* Reschedule LED off for full time period */
995 cancel_delayed_work(&priv->led_act_off);
996 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 }
998 }
999
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 unsigned long flags;
1004 spin_lock_irqsave(&priv->lock, flags);
1005 __ipw_led_activity_on(priv);
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif /* 0 */
1009
1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 unsigned long flags;
1013 u32 led;
1014
1015 if (priv->config & CFG_NO_LED)
1016 return;
1017
1018 spin_lock_irqsave(&priv->lock, flags);
1019
1020 if (priv->status & STATUS_LED_ACT_ON) {
1021 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 led &= priv->led_activity_off;
1023
1024 led = ipw_register_toggle(led);
1025
1026 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028
1029 IPW_DEBUG_LED("Activity LED Off\n");
1030
1031 priv->status &= ~STATUS_LED_ACT_ON;
1032 }
1033
1034 spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036
1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 struct ipw_priv *priv =
1040 container_of(work, struct ipw_priv, led_act_off.work);
1041 mutex_lock(&priv->mutex);
1042 ipw_led_activity_off(priv);
1043 mutex_unlock(&priv->mutex);
1044 }
1045
1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 unsigned long flags;
1049 u32 led;
1050
1051 /* Only nic type 1 supports mode LEDs */
1052 if (priv->config & CFG_NO_LED ||
1053 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 return;
1055
1056 spin_lock_irqsave(&priv->lock, flags);
1057
1058 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 if (priv->assoc_network->mode == IEEE_A) {
1060 led |= priv->led_ofdm_on;
1061 led &= priv->led_association_off;
1062 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 } else if (priv->assoc_network->mode == IEEE_G) {
1064 led |= priv->led_ofdm_on;
1065 led |= priv->led_association_on;
1066 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 } else {
1068 led &= priv->led_ofdm_off;
1069 led |= priv->led_association_on;
1070 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 }
1072
1073 led = ipw_register_toggle(led);
1074
1075 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077
1078 spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080
1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 unsigned long flags;
1084 u32 led;
1085
1086 /* Only nic type 1 supports mode LEDs */
1087 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 return;
1089
1090 spin_lock_irqsave(&priv->lock, flags);
1091
1092 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 led &= priv->led_ofdm_off;
1094 led &= priv->led_association_off;
1095
1096 led = ipw_register_toggle(led);
1097
1098 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100
1101 spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103
1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 ipw_led_link_on(priv);
1107 }
1108
1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 ipw_led_activity_off(priv);
1112 ipw_led_link_off(priv);
1113 }
1114
1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 /* Set the Link Led on for all nic types */
1118 ipw_led_link_on(priv);
1119 }
1120
1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 ipw_led_activity_off(priv);
1124 ipw_led_link_off(priv);
1125
1126 if (priv->status & STATUS_RF_KILL_MASK)
1127 ipw_led_radio_off(priv);
1128 }
1129
1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133
1134 /* Set the default PINs for the link and activity leds */
1135 priv->led_activity_on = IPW_ACTIVITY_LED;
1136 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137
1138 priv->led_association_on = IPW_ASSOCIATED_LED;
1139 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140
1141 /* Set the default PINs for the OFDM leds */
1142 priv->led_ofdm_on = IPW_OFDM_LED;
1143 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144
1145 switch (priv->nic_type) {
1146 case EEPROM_NIC_TYPE_1:
1147 /* In this NIC type, the LEDs are reversed.... */
1148 priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 priv->led_association_on = IPW_ACTIVITY_LED;
1151 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152
1153 if (!(priv->config & CFG_NO_LED))
1154 ipw_led_band_on(priv);
1155
1156 /* And we don't blink link LEDs for this nic, so
1157 * just return here */
1158 return;
1159
1160 case EEPROM_NIC_TYPE_3:
1161 case EEPROM_NIC_TYPE_2:
1162 case EEPROM_NIC_TYPE_4:
1163 case EEPROM_NIC_TYPE_0:
1164 break;
1165
1166 default:
1167 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 priv->nic_type);
1169 priv->nic_type = EEPROM_NIC_TYPE_0;
1170 break;
1171 }
1172
1173 if (!(priv->config & CFG_NO_LED)) {
1174 if (priv->status & STATUS_ASSOCIATED)
1175 ipw_led_link_on(priv);
1176 else
1177 ipw_led_link_off(priv);
1178 }
1179 }
1180
1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 ipw_led_activity_off(priv);
1184 ipw_led_link_off(priv);
1185 ipw_led_band_off(priv);
1186 cancel_delayed_work(&priv->led_link_on);
1187 cancel_delayed_work(&priv->led_link_off);
1188 cancel_delayed_work(&priv->led_act_off);
1189 }
1190
1191 /*
1192 * The following adds a new attribute to the sysfs representation
1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194 * used for controlling the debug level.
1195 *
1196 * See the level definitions in ipw for details.
1197 */
1198 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199 {
1200 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202
1203 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204 size_t count)
1205 {
1206 char *p = (char *)buf;
1207 u32 val;
1208
1209 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 p++;
1211 if (p[0] == 'x' || p[0] == 'X')
1212 p++;
1213 val = simple_strtoul(p, &p, 16);
1214 } else
1215 val = simple_strtoul(p, &p, 10);
1216 if (p == buf)
1217 printk(KERN_INFO DRV_NAME
1218 ": %s is not in hex or decimal form.\n", buf);
1219 else
1220 ipw_debug_level = val;
1221
1222 return strnlen(buf, count);
1223 }
1224
1225 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226 show_debug_level, store_debug_level);
1227
1228 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229 {
1230 /* length = 1st dword in log */
1231 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232 }
1233
1234 static void ipw_capture_event_log(struct ipw_priv *priv,
1235 u32 log_len, struct ipw_event *log)
1236 {
1237 u32 base;
1238
1239 if (log_len) {
1240 base = ipw_read32(priv, IPW_EVENT_LOG);
1241 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242 (u8 *) log, sizeof(*log) * log_len);
1243 }
1244 }
1245
1246 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247 {
1248 struct ipw_fw_error *error;
1249 u32 log_len = ipw_get_event_log_len(priv);
1250 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251 u32 elem_len = ipw_read_reg32(priv, base);
1252
1253 error = kmalloc(sizeof(*error) +
1254 sizeof(*error->elem) * elem_len +
1255 sizeof(*error->log) * log_len, GFP_ATOMIC);
1256 if (!error) {
1257 IPW_ERROR("Memory allocation for firmware error log "
1258 "failed.\n");
1259 return NULL;
1260 }
1261 error->jiffies = jiffies;
1262 error->status = priv->status;
1263 error->config = priv->config;
1264 error->elem_len = elem_len;
1265 error->log_len = log_len;
1266 error->elem = (struct ipw_error_elem *)error->payload;
1267 error->log = (struct ipw_event *)(error->elem + elem_len);
1268
1269 ipw_capture_event_log(priv, log_len, error->log);
1270
1271 if (elem_len)
1272 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273 sizeof(*error->elem) * elem_len);
1274
1275 return error;
1276 }
1277
1278 static ssize_t show_event_log(struct device *d,
1279 struct device_attribute *attr, char *buf)
1280 {
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 u32 log_len = ipw_get_event_log_len(priv);
1283 u32 log_size;
1284 struct ipw_event *log;
1285 u32 len = 0, i;
1286
1287 /* not using min() because of its strict type checking */
1288 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289 sizeof(*log) * log_len : PAGE_SIZE;
1290 log = kzalloc(log_size, GFP_KERNEL);
1291 if (!log) {
1292 IPW_ERROR("Unable to allocate memory for log\n");
1293 return 0;
1294 }
1295 log_len = log_size / sizeof(*log);
1296 ipw_capture_event_log(priv, log_len, log);
1297
1298 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299 for (i = 0; i < log_len; i++)
1300 len += snprintf(buf + len, PAGE_SIZE - len,
1301 "\n%08X%08X%08X",
1302 log[i].time, log[i].event, log[i].data);
1303 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304 kfree(log);
1305 return len;
1306 }
1307
1308 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309
1310 static ssize_t show_error(struct device *d,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct ipw_priv *priv = dev_get_drvdata(d);
1314 u32 len = 0, i;
1315 if (!priv->error)
1316 return 0;
1317 len += snprintf(buf + len, PAGE_SIZE - len,
1318 "%08lX%08X%08X%08X",
1319 priv->error->jiffies,
1320 priv->error->status,
1321 priv->error->config, priv->error->elem_len);
1322 for (i = 0; i < priv->error->elem_len; i++)
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X%08X%08X%08X%08X%08X%08X",
1325 priv->error->elem[i].time,
1326 priv->error->elem[i].desc,
1327 priv->error->elem[i].blink1,
1328 priv->error->elem[i].blink2,
1329 priv->error->elem[i].link1,
1330 priv->error->elem[i].link2,
1331 priv->error->elem[i].data);
1332
1333 len += snprintf(buf + len, PAGE_SIZE - len,
1334 "\n%08X", priv->error->log_len);
1335 for (i = 0; i < priv->error->log_len; i++)
1336 len += snprintf(buf + len, PAGE_SIZE - len,
1337 "\n%08X%08X%08X",
1338 priv->error->log[i].time,
1339 priv->error->log[i].event,
1340 priv->error->log[i].data);
1341 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342 return len;
1343 }
1344
1345 static ssize_t clear_error(struct device *d,
1346 struct device_attribute *attr,
1347 const char *buf, size_t count)
1348 {
1349 struct ipw_priv *priv = dev_get_drvdata(d);
1350
1351 kfree(priv->error);
1352 priv->error = NULL;
1353 return count;
1354 }
1355
1356 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357
1358 static ssize_t show_cmd_log(struct device *d,
1359 struct device_attribute *attr, char *buf)
1360 {
1361 struct ipw_priv *priv = dev_get_drvdata(d);
1362 u32 len = 0, i;
1363 if (!priv->cmdlog)
1364 return 0;
1365 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1367 i = (i + 1) % priv->cmdlog_len) {
1368 len +=
1369 snprintf(buf + len, PAGE_SIZE - len,
1370 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372 priv->cmdlog[i].cmd.len);
1373 len +=
1374 snprintk_buf(buf + len, PAGE_SIZE - len,
1375 (u8 *) priv->cmdlog[i].cmd.param,
1376 priv->cmdlog[i].cmd.len);
1377 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 }
1379 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380 return len;
1381 }
1382
1383 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384
1385 #ifdef CONFIG_IPW2200_PROMISCUOUS
1386 static void ipw_prom_free(struct ipw_priv *priv);
1387 static int ipw_prom_alloc(struct ipw_priv *priv);
1388 static ssize_t store_rtap_iface(struct device *d,
1389 struct device_attribute *attr,
1390 const char *buf, size_t count)
1391 {
1392 struct ipw_priv *priv = dev_get_drvdata(d);
1393 int rc = 0;
1394
1395 if (count < 1)
1396 return -EINVAL;
1397
1398 switch (buf[0]) {
1399 case '0':
1400 if (!rtap_iface)
1401 return count;
1402
1403 if (netif_running(priv->prom_net_dev)) {
1404 IPW_WARNING("Interface is up. Cannot unregister.\n");
1405 return count;
1406 }
1407
1408 ipw_prom_free(priv);
1409 rtap_iface = 0;
1410 break;
1411
1412 case '1':
1413 if (rtap_iface)
1414 return count;
1415
1416 rc = ipw_prom_alloc(priv);
1417 if (!rc)
1418 rtap_iface = 1;
1419 break;
1420
1421 default:
1422 return -EINVAL;
1423 }
1424
1425 if (rc) {
1426 IPW_ERROR("Failed to register promiscuous network "
1427 "device (error %d).\n", rc);
1428 }
1429
1430 return count;
1431 }
1432
1433 static ssize_t show_rtap_iface(struct device *d,
1434 struct device_attribute *attr,
1435 char *buf)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 if (rtap_iface)
1439 return sprintf(buf, "%s", priv->prom_net_dev->name);
1440 else {
1441 buf[0] = '-';
1442 buf[1] = '1';
1443 buf[2] = '\0';
1444 return 3;
1445 }
1446 }
1447
1448 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449 store_rtap_iface);
1450
1451 static ssize_t store_rtap_filter(struct device *d,
1452 struct device_attribute *attr,
1453 const char *buf, size_t count)
1454 {
1455 struct ipw_priv *priv = dev_get_drvdata(d);
1456
1457 if (!priv->prom_priv) {
1458 IPW_ERROR("Attempting to set filter without "
1459 "rtap_iface enabled.\n");
1460 return -EPERM;
1461 }
1462
1463 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464
1465 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466 BIT_ARG16(priv->prom_priv->filter));
1467
1468 return count;
1469 }
1470
1471 static ssize_t show_rtap_filter(struct device *d,
1472 struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "0x%04X",
1477 priv->prom_priv ? priv->prom_priv->filter : 0);
1478 }
1479
1480 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481 store_rtap_filter);
1482 #endif
1483
1484 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485 char *buf)
1486 {
1487 struct ipw_priv *priv = dev_get_drvdata(d);
1488 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489 }
1490
1491 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492 const char *buf, size_t count)
1493 {
1494 struct ipw_priv *priv = dev_get_drvdata(d);
1495 struct net_device *dev = priv->net_dev;
1496 char buffer[] = "00000000";
1497 unsigned long len =
1498 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499 unsigned long val;
1500 char *p = buffer;
1501
1502 IPW_DEBUG_INFO("enter\n");
1503
1504 strncpy(buffer, buf, len);
1505 buffer[len] = 0;
1506
1507 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508 p++;
1509 if (p[0] == 'x' || p[0] == 'X')
1510 p++;
1511 val = simple_strtoul(p, &p, 16);
1512 } else
1513 val = simple_strtoul(p, &p, 10);
1514 if (p == buffer) {
1515 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516 } else {
1517 priv->ieee->scan_age = val;
1518 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519 }
1520
1521 IPW_DEBUG_INFO("exit\n");
1522 return len;
1523 }
1524
1525 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526
1527 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct ipw_priv *priv = dev_get_drvdata(d);
1531 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532 }
1533
1534 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535 const char *buf, size_t count)
1536 {
1537 struct ipw_priv *priv = dev_get_drvdata(d);
1538
1539 IPW_DEBUG_INFO("enter\n");
1540
1541 if (count == 0)
1542 return 0;
1543
1544 if (*buf == 0) {
1545 IPW_DEBUG_LED("Disabling LED control.\n");
1546 priv->config |= CFG_NO_LED;
1547 ipw_led_shutdown(priv);
1548 } else {
1549 IPW_DEBUG_LED("Enabling LED control.\n");
1550 priv->config &= ~CFG_NO_LED;
1551 ipw_led_init(priv);
1552 }
1553
1554 IPW_DEBUG_INFO("exit\n");
1555 return count;
1556 }
1557
1558 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559
1560 static ssize_t show_status(struct device *d,
1561 struct device_attribute *attr, char *buf)
1562 {
1563 struct ipw_priv *p = dev_get_drvdata(d);
1564 return sprintf(buf, "0x%08x\n", (int)p->status);
1565 }
1566
1567 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568
1569 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570 char *buf)
1571 {
1572 struct ipw_priv *p = dev_get_drvdata(d);
1573 return sprintf(buf, "0x%08x\n", (int)p->config);
1574 }
1575
1576 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577
1578 static ssize_t show_nic_type(struct device *d,
1579 struct device_attribute *attr, char *buf)
1580 {
1581 struct ipw_priv *priv = dev_get_drvdata(d);
1582 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583 }
1584
1585 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586
1587 static ssize_t show_ucode_version(struct device *d,
1588 struct device_attribute *attr, char *buf)
1589 {
1590 u32 len = sizeof(u32), tmp = 0;
1591 struct ipw_priv *p = dev_get_drvdata(d);
1592
1593 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594 return 0;
1595
1596 return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598
1599 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600
1601 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 u32 len = sizeof(u32), tmp = 0;
1605 struct ipw_priv *p = dev_get_drvdata(d);
1606
1607 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608 return 0;
1609
1610 return sprintf(buf, "0x%08x\n", tmp);
1611 }
1612
1613 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614
1615 /*
1616 * Add a device attribute to view/control the delay between eeprom
1617 * operations.
1618 */
1619 static ssize_t show_eeprom_delay(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621 {
1622 struct ipw_priv *p = dev_get_drvdata(d);
1623 int n = p->eeprom_delay;
1624 return sprintf(buf, "%i\n", n);
1625 }
1626 static ssize_t store_eeprom_delay(struct device *d,
1627 struct device_attribute *attr,
1628 const char *buf, size_t count)
1629 {
1630 struct ipw_priv *p = dev_get_drvdata(d);
1631 sscanf(buf, "%i", &p->eeprom_delay);
1632 return strnlen(buf, count);
1633 }
1634
1635 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636 show_eeprom_delay, store_eeprom_delay);
1637
1638 static ssize_t show_command_event_reg(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640 {
1641 u32 reg = 0;
1642 struct ipw_priv *p = dev_get_drvdata(d);
1643
1644 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645 return sprintf(buf, "0x%08x\n", reg);
1646 }
1647 static ssize_t store_command_event_reg(struct device *d,
1648 struct device_attribute *attr,
1649 const char *buf, size_t count)
1650 {
1651 u32 reg;
1652 struct ipw_priv *p = dev_get_drvdata(d);
1653
1654 sscanf(buf, "%x", &reg);
1655 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656 return strnlen(buf, count);
1657 }
1658
1659 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660 show_command_event_reg, store_command_event_reg);
1661
1662 static ssize_t show_mem_gpio_reg(struct device *d,
1663 struct device_attribute *attr, char *buf)
1664 {
1665 u32 reg = 0;
1666 struct ipw_priv *p = dev_get_drvdata(d);
1667
1668 reg = ipw_read_reg32(p, 0x301100);
1669 return sprintf(buf, "0x%08x\n", reg);
1670 }
1671 static ssize_t store_mem_gpio_reg(struct device *d,
1672 struct device_attribute *attr,
1673 const char *buf, size_t count)
1674 {
1675 u32 reg;
1676 struct ipw_priv *p = dev_get_drvdata(d);
1677
1678 sscanf(buf, "%x", &reg);
1679 ipw_write_reg32(p, 0x301100, reg);
1680 return strnlen(buf, count);
1681 }
1682
1683 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684 show_mem_gpio_reg, store_mem_gpio_reg);
1685
1686 static ssize_t show_indirect_dword(struct device *d,
1687 struct device_attribute *attr, char *buf)
1688 {
1689 u32 reg = 0;
1690 struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692 if (priv->status & STATUS_INDIRECT_DWORD)
1693 reg = ipw_read_reg32(priv, priv->indirect_dword);
1694 else
1695 reg = 0;
1696
1697 return sprintf(buf, "0x%08x\n", reg);
1698 }
1699 static ssize_t store_indirect_dword(struct device *d,
1700 struct device_attribute *attr,
1701 const char *buf, size_t count)
1702 {
1703 struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705 sscanf(buf, "%x", &priv->indirect_dword);
1706 priv->status |= STATUS_INDIRECT_DWORD;
1707 return strnlen(buf, count);
1708 }
1709
1710 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711 show_indirect_dword, store_indirect_dword);
1712
1713 static ssize_t show_indirect_byte(struct device *d,
1714 struct device_attribute *attr, char *buf)
1715 {
1716 u8 reg = 0;
1717 struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719 if (priv->status & STATUS_INDIRECT_BYTE)
1720 reg = ipw_read_reg8(priv, priv->indirect_byte);
1721 else
1722 reg = 0;
1723
1724 return sprintf(buf, "0x%02x\n", reg);
1725 }
1726 static ssize_t store_indirect_byte(struct device *d,
1727 struct device_attribute *attr,
1728 const char *buf, size_t count)
1729 {
1730 struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732 sscanf(buf, "%x", &priv->indirect_byte);
1733 priv->status |= STATUS_INDIRECT_BYTE;
1734 return strnlen(buf, count);
1735 }
1736
1737 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738 show_indirect_byte, store_indirect_byte);
1739
1740 static ssize_t show_direct_dword(struct device *d,
1741 struct device_attribute *attr, char *buf)
1742 {
1743 u32 reg = 0;
1744 struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746 if (priv->status & STATUS_DIRECT_DWORD)
1747 reg = ipw_read32(priv, priv->direct_dword);
1748 else
1749 reg = 0;
1750
1751 return sprintf(buf, "0x%08x\n", reg);
1752 }
1753 static ssize_t store_direct_dword(struct device *d,
1754 struct device_attribute *attr,
1755 const char *buf, size_t count)
1756 {
1757 struct ipw_priv *priv = dev_get_drvdata(d);
1758
1759 sscanf(buf, "%x", &priv->direct_dword);
1760 priv->status |= STATUS_DIRECT_DWORD;
1761 return strnlen(buf, count);
1762 }
1763
1764 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765 show_direct_dword, store_direct_dword);
1766
1767 static int rf_kill_active(struct ipw_priv *priv)
1768 {
1769 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770 priv->status |= STATUS_RF_KILL_HW;
1771 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772 } else {
1773 priv->status &= ~STATUS_RF_KILL_HW;
1774 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775 }
1776
1777 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778 }
1779
1780 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781 char *buf)
1782 {
1783 /* 0 - RF kill not enabled
1784 1 - SW based RF kill active (sysfs)
1785 2 - HW based RF kill active
1786 3 - Both HW and SW baed RF kill active */
1787 struct ipw_priv *priv = dev_get_drvdata(d);
1788 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789 (rf_kill_active(priv) ? 0x2 : 0x0);
1790 return sprintf(buf, "%i\n", val);
1791 }
1792
1793 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794 {
1795 if ((disable_radio ? 1 : 0) ==
1796 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797 return 0;
1798
1799 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1800 disable_radio ? "OFF" : "ON");
1801
1802 if (disable_radio) {
1803 priv->status |= STATUS_RF_KILL_SW;
1804
1805 cancel_delayed_work(&priv->request_scan);
1806 cancel_delayed_work(&priv->request_direct_scan);
1807 cancel_delayed_work(&priv->request_passive_scan);
1808 cancel_delayed_work(&priv->scan_event);
1809 schedule_work(&priv->down);
1810 } else {
1811 priv->status &= ~STATUS_RF_KILL_SW;
1812 if (rf_kill_active(priv)) {
1813 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814 "disabled by HW switch\n");
1815 /* Make sure the RF_KILL check timer is running */
1816 cancel_delayed_work(&priv->rf_kill);
1817 schedule_delayed_work(&priv->rf_kill,
1818 round_jiffies_relative(2 * HZ));
1819 } else
1820 schedule_work(&priv->up);
1821 }
1822
1823 return 1;
1824 }
1825
1826 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827 const char *buf, size_t count)
1828 {
1829 struct ipw_priv *priv = dev_get_drvdata(d);
1830
1831 ipw_radio_kill_sw(priv, buf[0] == '1');
1832
1833 return count;
1834 }
1835
1836 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837
1838 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839 char *buf)
1840 {
1841 struct ipw_priv *priv = dev_get_drvdata(d);
1842 int pos = 0, len = 0;
1843 if (priv->config & CFG_SPEED_SCAN) {
1844 while (priv->speed_scan[pos] != 0)
1845 len += sprintf(&buf[len], "%d ",
1846 priv->speed_scan[pos++]);
1847 return len + sprintf(&buf[len], "\n");
1848 }
1849
1850 return sprintf(buf, "0\n");
1851 }
1852
1853 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854 const char *buf, size_t count)
1855 {
1856 struct ipw_priv *priv = dev_get_drvdata(d);
1857 int channel, pos = 0;
1858 const char *p = buf;
1859
1860 /* list of space separated channels to scan, optionally ending with 0 */
1861 while ((channel = simple_strtol(p, NULL, 0))) {
1862 if (pos == MAX_SPEED_SCAN - 1) {
1863 priv->speed_scan[pos] = 0;
1864 break;
1865 }
1866
1867 if (libipw_is_valid_channel(priv->ieee, channel))
1868 priv->speed_scan[pos++] = channel;
1869 else
1870 IPW_WARNING("Skipping invalid channel request: %d\n",
1871 channel);
1872 p = strchr(p, ' ');
1873 if (!p)
1874 break;
1875 while (*p == ' ' || *p == '\t')
1876 p++;
1877 }
1878
1879 if (pos == 0)
1880 priv->config &= ~CFG_SPEED_SCAN;
1881 else {
1882 priv->speed_scan_pos = 0;
1883 priv->config |= CFG_SPEED_SCAN;
1884 }
1885
1886 return count;
1887 }
1888
1889 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890 store_speed_scan);
1891
1892 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893 char *buf)
1894 {
1895 struct ipw_priv *priv = dev_get_drvdata(d);
1896 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897 }
1898
1899 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900 const char *buf, size_t count)
1901 {
1902 struct ipw_priv *priv = dev_get_drvdata(d);
1903 if (buf[0] == '1')
1904 priv->config |= CFG_NET_STATS;
1905 else
1906 priv->config &= ~CFG_NET_STATS;
1907
1908 return count;
1909 }
1910
1911 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912 show_net_stats, store_net_stats);
1913
1914 static ssize_t show_channels(struct device *d,
1915 struct device_attribute *attr,
1916 char *buf)
1917 {
1918 struct ipw_priv *priv = dev_get_drvdata(d);
1919 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920 int len = 0, i;
1921
1922 len = sprintf(&buf[len],
1923 "Displaying %d channels in 2.4Ghz band "
1924 "(802.11bg):\n", geo->bg_channels);
1925
1926 for (i = 0; i < geo->bg_channels; i++) {
1927 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928 geo->bg[i].channel,
1929 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930 " (radar spectrum)" : "",
1931 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933 ? "" : ", IBSS",
1934 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935 "passive only" : "active/passive",
1936 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937 "B" : "B/G");
1938 }
1939
1940 len += sprintf(&buf[len],
1941 "Displaying %d channels in 5.2Ghz band "
1942 "(802.11a):\n", geo->a_channels);
1943 for (i = 0; i < geo->a_channels; i++) {
1944 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945 geo->a[i].channel,
1946 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947 " (radar spectrum)" : "",
1948 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950 ? "" : ", IBSS",
1951 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952 "passive only" : "active/passive");
1953 }
1954
1955 return len;
1956 }
1957
1958 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959
1960 static void notify_wx_assoc_event(struct ipw_priv *priv)
1961 {
1962 union iwreq_data wrqu;
1963 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964 if (priv->status & STATUS_ASSOCIATED)
1965 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966 else
1967 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1968 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969 }
1970
1971 static void ipw_irq_tasklet(struct ipw_priv *priv)
1972 {
1973 u32 inta, inta_mask, handled = 0;
1974 unsigned long flags;
1975 int rc = 0;
1976
1977 spin_lock_irqsave(&priv->irq_lock, flags);
1978
1979 inta = ipw_read32(priv, IPW_INTA_RW);
1980 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1981
1982 if (inta == 0xFFFFFFFF) {
1983 /* Hardware disappeared */
1984 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1985 /* Only handle the cached INTA values */
1986 inta = 0;
1987 }
1988 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1989
1990 /* Add any cached INTA values that need to be handled */
1991 inta |= priv->isr_inta;
1992
1993 spin_unlock_irqrestore(&priv->irq_lock, flags);
1994
1995 spin_lock_irqsave(&priv->lock, flags);
1996
1997 /* handle all the justifications for the interrupt */
1998 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1999 ipw_rx(priv);
2000 handled |= IPW_INTA_BIT_RX_TRANSFER;
2001 }
2002
2003 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2004 IPW_DEBUG_HC("Command completed.\n");
2005 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2006 priv->status &= ~STATUS_HCMD_ACTIVE;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2009 }
2010
2011 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2012 IPW_DEBUG_TX("TX_QUEUE_1\n");
2013 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2014 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2015 }
2016
2017 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2018 IPW_DEBUG_TX("TX_QUEUE_2\n");
2019 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2020 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2021 }
2022
2023 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2024 IPW_DEBUG_TX("TX_QUEUE_3\n");
2025 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2026 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2027 }
2028
2029 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2030 IPW_DEBUG_TX("TX_QUEUE_4\n");
2031 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2032 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2033 }
2034
2035 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2036 IPW_WARNING("STATUS_CHANGE\n");
2037 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2041 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2042 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2043 }
2044
2045 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2046 IPW_WARNING("HOST_CMD_DONE\n");
2047 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2048 }
2049
2050 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2051 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2052 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2053 }
2054
2055 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2056 IPW_WARNING("PHY_OFF_DONE\n");
2057 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2058 }
2059
2060 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2061 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2062 priv->status |= STATUS_RF_KILL_HW;
2063 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2064 wake_up_interruptible(&priv->wait_command_queue);
2065 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2066 cancel_delayed_work(&priv->request_scan);
2067 cancel_delayed_work(&priv->request_direct_scan);
2068 cancel_delayed_work(&priv->request_passive_scan);
2069 cancel_delayed_work(&priv->scan_event);
2070 schedule_work(&priv->link_down);
2071 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2072 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2073 }
2074
2075 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2076 IPW_WARNING("Firmware error detected. Restarting.\n");
2077 if (priv->error) {
2078 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2079 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2080 struct ipw_fw_error *error =
2081 ipw_alloc_error_log(priv);
2082 ipw_dump_error_log(priv, error);
2083 kfree(error);
2084 }
2085 } else {
2086 priv->error = ipw_alloc_error_log(priv);
2087 if (priv->error)
2088 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2089 else
2090 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2091 "log.\n");
2092 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2093 ipw_dump_error_log(priv, priv->error);
2094 }
2095
2096 /* XXX: If hardware encryption is for WPA/WPA2,
2097 * we have to notify the supplicant. */
2098 if (priv->ieee->sec.encrypt) {
2099 priv->status &= ~STATUS_ASSOCIATED;
2100 notify_wx_assoc_event(priv);
2101 }
2102
2103 /* Keep the restart process from trying to send host
2104 * commands by clearing the INIT status bit */
2105 priv->status &= ~STATUS_INIT;
2106
2107 /* Cancel currently queued command. */
2108 priv->status &= ~STATUS_HCMD_ACTIVE;
2109 wake_up_interruptible(&priv->wait_command_queue);
2110
2111 schedule_work(&priv->adapter_restart);
2112 handled |= IPW_INTA_BIT_FATAL_ERROR;
2113 }
2114
2115 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2116 IPW_ERROR("Parity error\n");
2117 handled |= IPW_INTA_BIT_PARITY_ERROR;
2118 }
2119
2120 if (handled != inta) {
2121 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2122 }
2123
2124 spin_unlock_irqrestore(&priv->lock, flags);
2125
2126 /* enable all interrupts */
2127 ipw_enable_interrupts(priv);
2128 }
2129
2130 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2131 static char *get_cmd_string(u8 cmd)
2132 {
2133 switch (cmd) {
2134 IPW_CMD(HOST_COMPLETE);
2135 IPW_CMD(POWER_DOWN);
2136 IPW_CMD(SYSTEM_CONFIG);
2137 IPW_CMD(MULTICAST_ADDRESS);
2138 IPW_CMD(SSID);
2139 IPW_CMD(ADAPTER_ADDRESS);
2140 IPW_CMD(PORT_TYPE);
2141 IPW_CMD(RTS_THRESHOLD);
2142 IPW_CMD(FRAG_THRESHOLD);
2143 IPW_CMD(POWER_MODE);
2144 IPW_CMD(WEP_KEY);
2145 IPW_CMD(TGI_TX_KEY);
2146 IPW_CMD(SCAN_REQUEST);
2147 IPW_CMD(SCAN_REQUEST_EXT);
2148 IPW_CMD(ASSOCIATE);
2149 IPW_CMD(SUPPORTED_RATES);
2150 IPW_CMD(SCAN_ABORT);
2151 IPW_CMD(TX_FLUSH);
2152 IPW_CMD(QOS_PARAMETERS);
2153 IPW_CMD(DINO_CONFIG);
2154 IPW_CMD(RSN_CAPABILITIES);
2155 IPW_CMD(RX_KEY);
2156 IPW_CMD(CARD_DISABLE);
2157 IPW_CMD(SEED_NUMBER);
2158 IPW_CMD(TX_POWER);
2159 IPW_CMD(COUNTRY_INFO);
2160 IPW_CMD(AIRONET_INFO);
2161 IPW_CMD(AP_TX_POWER);
2162 IPW_CMD(CCKM_INFO);
2163 IPW_CMD(CCX_VER_INFO);
2164 IPW_CMD(SET_CALIBRATION);
2165 IPW_CMD(SENSITIVITY_CALIB);
2166 IPW_CMD(RETRY_LIMIT);
2167 IPW_CMD(IPW_PRE_POWER_DOWN);
2168 IPW_CMD(VAP_BEACON_TEMPLATE);
2169 IPW_CMD(VAP_DTIM_PERIOD);
2170 IPW_CMD(EXT_SUPPORTED_RATES);
2171 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2172 IPW_CMD(VAP_QUIET_INTERVALS);
2173 IPW_CMD(VAP_CHANNEL_SWITCH);
2174 IPW_CMD(VAP_MANDATORY_CHANNELS);
2175 IPW_CMD(VAP_CELL_PWR_LIMIT);
2176 IPW_CMD(VAP_CF_PARAM_SET);
2177 IPW_CMD(VAP_SET_BEACONING_STATE);
2178 IPW_CMD(MEASUREMENT);
2179 IPW_CMD(POWER_CAPABILITY);
2180 IPW_CMD(SUPPORTED_CHANNELS);
2181 IPW_CMD(TPC_REPORT);
2182 IPW_CMD(WME_INFO);
2183 IPW_CMD(PRODUCTION_COMMAND);
2184 default:
2185 return "UNKNOWN";
2186 }
2187 }
2188
2189 #define HOST_COMPLETE_TIMEOUT HZ
2190
2191 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2192 {
2193 int rc = 0;
2194 unsigned long flags;
2195 unsigned long now, end;
2196
2197 spin_lock_irqsave(&priv->lock, flags);
2198 if (priv->status & STATUS_HCMD_ACTIVE) {
2199 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2200 get_cmd_string(cmd->cmd));
2201 spin_unlock_irqrestore(&priv->lock, flags);
2202 return -EAGAIN;
2203 }
2204
2205 priv->status |= STATUS_HCMD_ACTIVE;
2206
2207 if (priv->cmdlog) {
2208 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2209 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2210 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2211 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2212 cmd->len);
2213 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2214 }
2215
2216 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2217 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2218 priv->status);
2219
2220 #ifndef DEBUG_CMD_WEP_KEY
2221 if (cmd->cmd == IPW_CMD_WEP_KEY)
2222 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2223 else
2224 #endif
2225 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2226
2227 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2228 if (rc) {
2229 priv->status &= ~STATUS_HCMD_ACTIVE;
2230 IPW_ERROR("Failed to send %s: Reason %d\n",
2231 get_cmd_string(cmd->cmd), rc);
2232 spin_unlock_irqrestore(&priv->lock, flags);
2233 goto exit;
2234 }
2235 spin_unlock_irqrestore(&priv->lock, flags);
2236
2237 now = jiffies;
2238 end = now + HOST_COMPLETE_TIMEOUT;
2239 again:
2240 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2241 !(priv->
2242 status & STATUS_HCMD_ACTIVE),
2243 end - now);
2244 if (rc < 0) {
2245 now = jiffies;
2246 if (time_before(now, end))
2247 goto again;
2248 rc = 0;
2249 }
2250
2251 if (rc == 0) {
2252 spin_lock_irqsave(&priv->lock, flags);
2253 if (priv->status & STATUS_HCMD_ACTIVE) {
2254 IPW_ERROR("Failed to send %s: Command timed out.\n",
2255 get_cmd_string(cmd->cmd));
2256 priv->status &= ~STATUS_HCMD_ACTIVE;
2257 spin_unlock_irqrestore(&priv->lock, flags);
2258 rc = -EIO;
2259 goto exit;
2260 }
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262 } else
2263 rc = 0;
2264
2265 if (priv->status & STATUS_RF_KILL_HW) {
2266 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2267 get_cmd_string(cmd->cmd));
2268 rc = -EIO;
2269 goto exit;
2270 }
2271
2272 exit:
2273 if (priv->cmdlog) {
2274 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2275 priv->cmdlog_pos %= priv->cmdlog_len;
2276 }
2277 return rc;
2278 }
2279
2280 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2281 {
2282 struct host_cmd cmd = {
2283 .cmd = command,
2284 };
2285
2286 return __ipw_send_cmd(priv, &cmd);
2287 }
2288
2289 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2290 void *data)
2291 {
2292 struct host_cmd cmd = {
2293 .cmd = command,
2294 .len = len,
2295 .param = data,
2296 };
2297
2298 return __ipw_send_cmd(priv, &cmd);
2299 }
2300
2301 static int ipw_send_host_complete(struct ipw_priv *priv)
2302 {
2303 if (!priv) {
2304 IPW_ERROR("Invalid args\n");
2305 return -1;
2306 }
2307
2308 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2309 }
2310
2311 static int ipw_send_system_config(struct ipw_priv *priv)
2312 {
2313 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2314 sizeof(priv->sys_config),
2315 &priv->sys_config);
2316 }
2317
2318 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2319 {
2320 if (!priv || !ssid) {
2321 IPW_ERROR("Invalid args\n");
2322 return -1;
2323 }
2324
2325 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2326 ssid);
2327 }
2328
2329 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2330 {
2331 if (!priv || !mac) {
2332 IPW_ERROR("Invalid args\n");
2333 return -1;
2334 }
2335
2336 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2337 priv->net_dev->name, mac);
2338
2339 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2340 }
2341
2342 static void ipw_adapter_restart(void *adapter)
2343 {
2344 struct ipw_priv *priv = adapter;
2345
2346 if (priv->status & STATUS_RF_KILL_MASK)
2347 return;
2348
2349 ipw_down(priv);
2350
2351 if (priv->assoc_network &&
2352 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2353 ipw_remove_current_network(priv);
2354
2355 if (ipw_up(priv)) {
2356 IPW_ERROR("Failed to up device\n");
2357 return;
2358 }
2359 }
2360
2361 static void ipw_bg_adapter_restart(struct work_struct *work)
2362 {
2363 struct ipw_priv *priv =
2364 container_of(work, struct ipw_priv, adapter_restart);
2365 mutex_lock(&priv->mutex);
2366 ipw_adapter_restart(priv);
2367 mutex_unlock(&priv->mutex);
2368 }
2369
2370 static void ipw_abort_scan(struct ipw_priv *priv);
2371
2372 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2373
2374 static void ipw_scan_check(void *data)
2375 {
2376 struct ipw_priv *priv = data;
2377
2378 if (priv->status & STATUS_SCAN_ABORTING) {
2379 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2380 "adapter after (%dms).\n",
2381 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2382 schedule_work(&priv->adapter_restart);
2383 } else if (priv->status & STATUS_SCANNING) {
2384 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2385 "after (%dms).\n",
2386 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2387 ipw_abort_scan(priv);
2388 schedule_delayed_work(&priv->scan_check, HZ);
2389 }
2390 }
2391
2392 static void ipw_bg_scan_check(struct work_struct *work)
2393 {
2394 struct ipw_priv *priv =
2395 container_of(work, struct ipw_priv, scan_check.work);
2396 mutex_lock(&priv->mutex);
2397 ipw_scan_check(priv);
2398 mutex_unlock(&priv->mutex);
2399 }
2400
2401 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2402 struct ipw_scan_request_ext *request)
2403 {
2404 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2405 sizeof(*request), request);
2406 }
2407
2408 static int ipw_send_scan_abort(struct ipw_priv *priv)
2409 {
2410 if (!priv) {
2411 IPW_ERROR("Invalid args\n");
2412 return -1;
2413 }
2414
2415 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2416 }
2417
2418 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2419 {
2420 struct ipw_sensitivity_calib calib = {
2421 .beacon_rssi_raw = cpu_to_le16(sens),
2422 };
2423
2424 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2425 &calib);
2426 }
2427
2428 static int ipw_send_associate(struct ipw_priv *priv,
2429 struct ipw_associate *associate)
2430 {
2431 if (!priv || !associate) {
2432 IPW_ERROR("Invalid args\n");
2433 return -1;
2434 }
2435
2436 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2437 associate);
2438 }
2439
2440 static int ipw_send_supported_rates(struct ipw_priv *priv,
2441 struct ipw_supported_rates *rates)
2442 {
2443 if (!priv || !rates) {
2444 IPW_ERROR("Invalid args\n");
2445 return -1;
2446 }
2447
2448 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2449 rates);
2450 }
2451
2452 static int ipw_set_random_seed(struct ipw_priv *priv)
2453 {
2454 u32 val;
2455
2456 if (!priv) {
2457 IPW_ERROR("Invalid args\n");
2458 return -1;
2459 }
2460
2461 get_random_bytes(&val, sizeof(val));
2462
2463 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2464 }
2465
2466 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2467 {
2468 __le32 v = cpu_to_le32(phy_off);
2469 if (!priv) {
2470 IPW_ERROR("Invalid args\n");
2471 return -1;
2472 }
2473
2474 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2475 }
2476
2477 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2478 {
2479 if (!priv || !power) {
2480 IPW_ERROR("Invalid args\n");
2481 return -1;
2482 }
2483
2484 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2485 }
2486
2487 static int ipw_set_tx_power(struct ipw_priv *priv)
2488 {
2489 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2490 struct ipw_tx_power tx_power;
2491 s8 max_power;
2492 int i;
2493
2494 memset(&tx_power, 0, sizeof(tx_power));
2495
2496 /* configure device for 'G' band */
2497 tx_power.ieee_mode = IPW_G_MODE;
2498 tx_power.num_channels = geo->bg_channels;
2499 for (i = 0; i < geo->bg_channels; i++) {
2500 max_power = geo->bg[i].max_power;
2501 tx_power.channels_tx_power[i].channel_number =
2502 geo->bg[i].channel;
2503 tx_power.channels_tx_power[i].tx_power = max_power ?
2504 min(max_power, priv->tx_power) : priv->tx_power;
2505 }
2506 if (ipw_send_tx_power(priv, &tx_power))
2507 return -EIO;
2508
2509 /* configure device to also handle 'B' band */
2510 tx_power.ieee_mode = IPW_B_MODE;
2511 if (ipw_send_tx_power(priv, &tx_power))
2512 return -EIO;
2513
2514 /* configure device to also handle 'A' band */
2515 if (priv->ieee->abg_true) {
2516 tx_power.ieee_mode = IPW_A_MODE;
2517 tx_power.num_channels = geo->a_channels;
2518 for (i = 0; i < tx_power.num_channels; i++) {
2519 max_power = geo->a[i].max_power;
2520 tx_power.channels_tx_power[i].channel_number =
2521 geo->a[i].channel;
2522 tx_power.channels_tx_power[i].tx_power = max_power ?
2523 min(max_power, priv->tx_power) : priv->tx_power;
2524 }
2525 if (ipw_send_tx_power(priv, &tx_power))
2526 return -EIO;
2527 }
2528 return 0;
2529 }
2530
2531 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2532 {
2533 struct ipw_rts_threshold rts_threshold = {
2534 .rts_threshold = cpu_to_le16(rts),
2535 };
2536
2537 if (!priv) {
2538 IPW_ERROR("Invalid args\n");
2539 return -1;
2540 }
2541
2542 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2543 sizeof(rts_threshold), &rts_threshold);
2544 }
2545
2546 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2547 {
2548 struct ipw_frag_threshold frag_threshold = {
2549 .frag_threshold = cpu_to_le16(frag),
2550 };
2551
2552 if (!priv) {
2553 IPW_ERROR("Invalid args\n");
2554 return -1;
2555 }
2556
2557 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2558 sizeof(frag_threshold), &frag_threshold);
2559 }
2560
2561 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2562 {
2563 __le32 param;
2564
2565 if (!priv) {
2566 IPW_ERROR("Invalid args\n");
2567 return -1;
2568 }
2569
2570 /* If on battery, set to 3, if AC set to CAM, else user
2571 * level */
2572 switch (mode) {
2573 case IPW_POWER_BATTERY:
2574 param = cpu_to_le32(IPW_POWER_INDEX_3);
2575 break;
2576 case IPW_POWER_AC:
2577 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2578 break;
2579 default:
2580 param = cpu_to_le32(mode);
2581 break;
2582 }
2583
2584 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2585 &param);
2586 }
2587
2588 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2589 {
2590 struct ipw_retry_limit retry_limit = {
2591 .short_retry_limit = slimit,
2592 .long_retry_limit = llimit
2593 };
2594
2595 if (!priv) {
2596 IPW_ERROR("Invalid args\n");
2597 return -1;
2598 }
2599
2600 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2601 &retry_limit);
2602 }
2603
2604 /*
2605 * The IPW device contains a Microwire compatible EEPROM that stores
2606 * various data like the MAC address. Usually the firmware has exclusive
2607 * access to the eeprom, but during device initialization (before the
2608 * device driver has sent the HostComplete command to the firmware) the
2609 * device driver has read access to the EEPROM by way of indirect addressing
2610 * through a couple of memory mapped registers.
2611 *
2612 * The following is a simplified implementation for pulling data out of the
2613 * the eeprom, along with some helper functions to find information in
2614 * the per device private data's copy of the eeprom.
2615 *
2616 * NOTE: To better understand how these functions work (i.e what is a chip
2617 * select and why do have to keep driving the eeprom clock?), read
2618 * just about any data sheet for a Microwire compatible EEPROM.
2619 */
2620
2621 /* write a 32 bit value into the indirect accessor register */
2622 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2623 {
2624 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2625
2626 /* the eeprom requires some time to complete the operation */
2627 udelay(p->eeprom_delay);
2628 }
2629
2630 /* perform a chip select operation */
2631 static void eeprom_cs(struct ipw_priv *priv)
2632 {
2633 eeprom_write_reg(priv, 0);
2634 eeprom_write_reg(priv, EEPROM_BIT_CS);
2635 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636 eeprom_write_reg(priv, EEPROM_BIT_CS);
2637 }
2638
2639 /* perform a chip select operation */
2640 static void eeprom_disable_cs(struct ipw_priv *priv)
2641 {
2642 eeprom_write_reg(priv, EEPROM_BIT_CS);
2643 eeprom_write_reg(priv, 0);
2644 eeprom_write_reg(priv, EEPROM_BIT_SK);
2645 }
2646
2647 /* push a single bit down to the eeprom */
2648 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2649 {
2650 int d = (bit ? EEPROM_BIT_DI : 0);
2651 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2652 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2653 }
2654
2655 /* push an opcode followed by an address down to the eeprom */
2656 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2657 {
2658 int i;
2659
2660 eeprom_cs(priv);
2661 eeprom_write_bit(priv, 1);
2662 eeprom_write_bit(priv, op & 2);
2663 eeprom_write_bit(priv, op & 1);
2664 for (i = 7; i >= 0; i--) {
2665 eeprom_write_bit(priv, addr & (1 << i));
2666 }
2667 }
2668
2669 /* pull 16 bits off the eeprom, one bit at a time */
2670 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2671 {
2672 int i;
2673 u16 r = 0;
2674
2675 /* Send READ Opcode */
2676 eeprom_op(priv, EEPROM_CMD_READ, addr);
2677
2678 /* Send dummy bit */
2679 eeprom_write_reg(priv, EEPROM_BIT_CS);
2680
2681 /* Read the byte off the eeprom one bit at a time */
2682 for (i = 0; i < 16; i++) {
2683 u32 data = 0;
2684 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2685 eeprom_write_reg(priv, EEPROM_BIT_CS);
2686 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2687 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2688 }
2689
2690 /* Send another dummy bit */
2691 eeprom_write_reg(priv, 0);
2692 eeprom_disable_cs(priv);
2693
2694 return r;
2695 }
2696
2697 /* helper function for pulling the mac address out of the private */
2698 /* data's copy of the eeprom data */
2699 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700 {
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2702 }
2703
2704 static void ipw_read_eeprom(struct ipw_priv *priv)
2705 {
2706 int i;
2707 __le16 *eeprom = (__le16 *) priv->eeprom;
2708
2709 IPW_DEBUG_TRACE(">>\n");
2710
2711 /* read entire contents of eeprom into private buffer */
2712 for (i = 0; i < 128; i++)
2713 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2714
2715 IPW_DEBUG_TRACE("<<\n");
2716 }
2717
2718 /*
2719 * Either the device driver (i.e. the host) or the firmware can
2720 * load eeprom data into the designated region in SRAM. If neither
2721 * happens then the FW will shutdown with a fatal error.
2722 *
2723 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2724 * bit needs region of shared SRAM needs to be non-zero.
2725 */
2726 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2727 {
2728 int i;
2729
2730 IPW_DEBUG_TRACE(">>\n");
2731
2732 /*
2733 If the data looks correct, then copy it to our private
2734 copy. Otherwise let the firmware know to perform the operation
2735 on its own.
2736 */
2737 if (priv->eeprom[EEPROM_VERSION] != 0) {
2738 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2739
2740 /* write the eeprom data to sram */
2741 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2742 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2743
2744 /* Do not load eeprom data on fatal error or suspend */
2745 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2746 } else {
2747 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2748
2749 /* Load eeprom data on fatal error or suspend */
2750 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2751 }
2752
2753 IPW_DEBUG_TRACE("<<\n");
2754 }
2755
2756 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2757 {
2758 count >>= 2;
2759 if (!count)
2760 return;
2761 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2762 while (count--)
2763 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2764 }
2765
2766 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2767 {
2768 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2769 CB_NUMBER_OF_ELEMENTS_SMALL *
2770 sizeof(struct command_block));
2771 }
2772
2773 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2774 { /* start dma engine but no transfers yet */
2775
2776 IPW_DEBUG_FW(">> :\n");
2777
2778 /* Start the dma */
2779 ipw_fw_dma_reset_command_blocks(priv);
2780
2781 /* Write CB base address */
2782 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2783
2784 IPW_DEBUG_FW("<< :\n");
2785 return 0;
2786 }
2787
2788 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2789 {
2790 u32 control = 0;
2791
2792 IPW_DEBUG_FW(">> :\n");
2793
2794 /* set the Stop and Abort bit */
2795 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2796 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2797 priv->sram_desc.last_cb_index = 0;
2798
2799 IPW_DEBUG_FW("<<\n");
2800 }
2801
2802 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2803 struct command_block *cb)
2804 {
2805 u32 address =
2806 IPW_SHARED_SRAM_DMA_CONTROL +
2807 (sizeof(struct command_block) * index);
2808 IPW_DEBUG_FW(">> :\n");
2809
2810 ipw_write_indirect(priv, address, (u8 *) cb,
2811 (int)sizeof(struct command_block));
2812
2813 IPW_DEBUG_FW("<< :\n");
2814 return 0;
2815
2816 }
2817
2818 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2819 {
2820 u32 control = 0;
2821 u32 index = 0;
2822
2823 IPW_DEBUG_FW(">> :\n");
2824
2825 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2826 ipw_fw_dma_write_command_block(priv, index,
2827 &priv->sram_desc.cb_list[index]);
2828
2829 /* Enable the DMA in the CSR register */
2830 ipw_clear_bit(priv, IPW_RESET_REG,
2831 IPW_RESET_REG_MASTER_DISABLED |
2832 IPW_RESET_REG_STOP_MASTER);
2833
2834 /* Set the Start bit. */
2835 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2836 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2837
2838 IPW_DEBUG_FW("<< :\n");
2839 return 0;
2840 }
2841
2842 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2843 {
2844 u32 address;
2845 u32 register_value = 0;
2846 u32 cb_fields_address = 0;
2847
2848 IPW_DEBUG_FW(">> :\n");
2849 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2850 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2851
2852 /* Read the DMA Controlor register */
2853 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2854 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2855
2856 /* Print the CB values */
2857 cb_fields_address = address;
2858 register_value = ipw_read_reg32(priv, cb_fields_address);
2859 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2860
2861 cb_fields_address += sizeof(u32);
2862 register_value = ipw_read_reg32(priv, cb_fields_address);
2863 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2864
2865 cb_fields_address += sizeof(u32);
2866 register_value = ipw_read_reg32(priv, cb_fields_address);
2867 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2868 register_value);
2869
2870 cb_fields_address += sizeof(u32);
2871 register_value = ipw_read_reg32(priv, cb_fields_address);
2872 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2873
2874 IPW_DEBUG_FW(">> :\n");
2875 }
2876
2877 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2878 {
2879 u32 current_cb_address = 0;
2880 u32 current_cb_index = 0;
2881
2882 IPW_DEBUG_FW("<< :\n");
2883 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2884
2885 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2886 sizeof(struct command_block);
2887
2888 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2889 current_cb_index, current_cb_address);
2890
2891 IPW_DEBUG_FW(">> :\n");
2892 return current_cb_index;
2893
2894 }
2895
2896 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2897 u32 src_address,
2898 u32 dest_address,
2899 u32 length,
2900 int interrupt_enabled, int is_last)
2901 {
2902
2903 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2904 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2905 CB_DEST_SIZE_LONG;
2906 struct command_block *cb;
2907 u32 last_cb_element = 0;
2908
2909 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2910 src_address, dest_address, length);
2911
2912 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2913 return -1;
2914
2915 last_cb_element = priv->sram_desc.last_cb_index;
2916 cb = &priv->sram_desc.cb_list[last_cb_element];
2917 priv->sram_desc.last_cb_index++;
2918
2919 /* Calculate the new CB control word */
2920 if (interrupt_enabled)
2921 control |= CB_INT_ENABLED;
2922
2923 if (is_last)
2924 control |= CB_LAST_VALID;
2925
2926 control |= length;
2927
2928 /* Calculate the CB Element's checksum value */
2929 cb->status = control ^ src_address ^ dest_address;
2930
2931 /* Copy the Source and Destination addresses */
2932 cb->dest_addr = dest_address;
2933 cb->source_addr = src_address;
2934
2935 /* Copy the Control Word last */
2936 cb->control = control;
2937
2938 return 0;
2939 }
2940
2941 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2942 int nr, u32 dest_address, u32 len)
2943 {
2944 int ret, i;
2945 u32 size;
2946
2947 IPW_DEBUG_FW(">>\n");
2948 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2949 nr, dest_address, len);
2950
2951 for (i = 0; i < nr; i++) {
2952 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2953 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2954 dest_address +
2955 i * CB_MAX_LENGTH, size,
2956 0, 0);
2957 if (ret) {
2958 IPW_DEBUG_FW_INFO(": Failed\n");
2959 return -1;
2960 } else
2961 IPW_DEBUG_FW_INFO(": Added new cb\n");
2962 }
2963
2964 IPW_DEBUG_FW("<<\n");
2965 return 0;
2966 }
2967
2968 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2969 {
2970 u32 current_index = 0, previous_index;
2971 u32 watchdog = 0;
2972
2973 IPW_DEBUG_FW(">> :\n");
2974
2975 current_index = ipw_fw_dma_command_block_index(priv);
2976 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2977 (int)priv->sram_desc.last_cb_index);
2978
2979 while (current_index < priv->sram_desc.last_cb_index) {
2980 udelay(50);
2981 previous_index = current_index;
2982 current_index = ipw_fw_dma_command_block_index(priv);
2983
2984 if (previous_index < current_index) {
2985 watchdog = 0;
2986 continue;
2987 }
2988 if (++watchdog > 400) {
2989 IPW_DEBUG_FW_INFO("Timeout\n");
2990 ipw_fw_dma_dump_command_block(priv);
2991 ipw_fw_dma_abort(priv);
2992 return -1;
2993 }
2994 }
2995
2996 ipw_fw_dma_abort(priv);
2997
2998 /*Disable the DMA in the CSR register */
2999 ipw_set_bit(priv, IPW_RESET_REG,
3000 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
3001
3002 IPW_DEBUG_FW("<< dmaWaitSync\n");
3003 return 0;
3004 }
3005
3006 static void ipw_remove_current_network(struct ipw_priv *priv)
3007 {
3008 struct list_head *element, *safe;
3009 struct libipw_network *network = NULL;
3010 unsigned long flags;
3011
3012 spin_lock_irqsave(&priv->ieee->lock, flags);
3013 list_for_each_safe(element, safe, &priv->ieee->network_list) {
3014 network = list_entry(element, struct libipw_network, list);
3015 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
3016 list_del(element);
3017 list_add_tail(&network->list,
3018 &priv->ieee->network_free_list);
3019 }
3020 }
3021 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3022 }
3023
3024 /**
3025 * Check that card is still alive.
3026 * Reads debug register from domain0.
3027 * If card is present, pre-defined value should
3028 * be found there.
3029 *
3030 * @param priv
3031 * @return 1 if card is present, 0 otherwise
3032 */
3033 static inline int ipw_alive(struct ipw_priv *priv)
3034 {
3035 return ipw_read32(priv, 0x90) == 0xd55555d5;
3036 }
3037
3038 /* timeout in msec, attempted in 10-msec quanta */
3039 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3040 int timeout)
3041 {
3042 int i = 0;
3043
3044 do {
3045 if ((ipw_read32(priv, addr) & mask) == mask)
3046 return i;
3047 mdelay(10);
3048 i += 10;
3049 } while (i < timeout);
3050
3051 return -ETIME;
3052 }
3053
3054 /* These functions load the firmware and micro code for the operation of
3055 * the ipw hardware. It assumes the buffer has all the bits for the
3056 * image and the caller is handling the memory allocation and clean up.
3057 */
3058
3059 static int ipw_stop_master(struct ipw_priv *priv)
3060 {
3061 int rc;
3062
3063 IPW_DEBUG_TRACE(">>\n");
3064 /* stop master. typical delay - 0 */
3065 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3066
3067 /* timeout is in msec, polled in 10-msec quanta */
3068 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3069 IPW_RESET_REG_MASTER_DISABLED, 100);
3070 if (rc < 0) {
3071 IPW_ERROR("wait for stop master failed after 100ms\n");
3072 return -1;
3073 }
3074
3075 IPW_DEBUG_INFO("stop master %dms\n", rc);
3076
3077 return rc;
3078 }
3079
3080 static void ipw_arc_release(struct ipw_priv *priv)
3081 {
3082 IPW_DEBUG_TRACE(">>\n");
3083 mdelay(5);
3084
3085 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3086
3087 /* no one knows timing, for safety add some delay */
3088 mdelay(5);
3089 }
3090
3091 struct fw_chunk {
3092 __le32 address;
3093 __le32 length;
3094 };
3095
3096 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3097 {
3098 int rc = 0, i, addr;
3099 u8 cr = 0;
3100 __le16 *image;
3101
3102 image = (__le16 *) data;
3103
3104 IPW_DEBUG_TRACE(">>\n");
3105
3106 rc = ipw_stop_master(priv);
3107
3108 if (rc < 0)
3109 return rc;
3110
3111 for (addr = IPW_SHARED_LOWER_BOUND;
3112 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3113 ipw_write32(priv, addr, 0);
3114 }
3115
3116 /* no ucode (yet) */
3117 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3118 /* destroy DMA queues */
3119 /* reset sequence */
3120
3121 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3122 ipw_arc_release(priv);
3123 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3124 mdelay(1);
3125
3126 /* reset PHY */
3127 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3128 mdelay(1);
3129
3130 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3131 mdelay(1);
3132
3133 /* enable ucode store */
3134 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3135 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3136 mdelay(1);
3137
3138 /* write ucode */
3139 /**
3140 * @bug
3141 * Do NOT set indirect address register once and then
3142 * store data to indirect data register in the loop.
3143 * It seems very reasonable, but in this case DINO do not
3144 * accept ucode. It is essential to set address each time.
3145 */
3146 /* load new ipw uCode */
3147 for (i = 0; i < len / 2; i++)
3148 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3149 le16_to_cpu(image[i]));
3150
3151 /* enable DINO */
3152 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3153 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3154
3155 /* this is where the igx / win driver deveates from the VAP driver. */
3156
3157 /* wait for alive response */
3158 for (i = 0; i < 100; i++) {
3159 /* poll for incoming data */
3160 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3161 if (cr & DINO_RXFIFO_DATA)
3162 break;
3163 mdelay(1);
3164 }
3165
3166 if (cr & DINO_RXFIFO_DATA) {
3167 /* alive_command_responce size is NOT multiple of 4 */
3168 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3169
3170 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3171 response_buffer[i] =
3172 cpu_to_le32(ipw_read_reg32(priv,
3173 IPW_BASEBAND_RX_FIFO_READ));
3174 memcpy(&priv->dino_alive, response_buffer,
3175 sizeof(priv->dino_alive));
3176 if (priv->dino_alive.alive_command == 1
3177 && priv->dino_alive.ucode_valid == 1) {
3178 rc = 0;
3179 IPW_DEBUG_INFO
3180 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3181 "of %02d/%02d/%02d %02d:%02d\n",
3182 priv->dino_alive.software_revision,
3183 priv->dino_alive.software_revision,
3184 priv->dino_alive.device_identifier,
3185 priv->dino_alive.device_identifier,
3186 priv->dino_alive.time_stamp[0],
3187 priv->dino_alive.time_stamp[1],
3188 priv->dino_alive.time_stamp[2],
3189 priv->dino_alive.time_stamp[3],
3190 priv->dino_alive.time_stamp[4]);
3191 } else {
3192 IPW_DEBUG_INFO("Microcode is not alive\n");
3193 rc = -EINVAL;
3194 }
3195 } else {
3196 IPW_DEBUG_INFO("No alive response from DINO\n");
3197 rc = -ETIME;
3198 }
3199
3200 /* disable DINO, otherwise for some reason
3201 firmware have problem getting alive resp. */
3202 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3203
3204 return rc;
3205 }
3206
3207 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3208 {
3209 int ret = -1;
3210 int offset = 0;
3211 struct fw_chunk *chunk;
3212 int total_nr = 0;
3213 int i;
3214 struct pci_pool *pool;
3215 void **virts;
3216 dma_addr_t *phys;
3217
3218 IPW_DEBUG_TRACE("<< :\n");
3219
3220 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3221 GFP_KERNEL);
3222 if (!virts)
3223 return -ENOMEM;
3224
3225 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3226 GFP_KERNEL);
3227 if (!phys) {
3228 kfree(virts);
3229 return -ENOMEM;
3230 }
3231 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3232 if (!pool) {
3233 IPW_ERROR("pci_pool_create failed\n");
3234 kfree(phys);
3235 kfree(virts);
3236 return -ENOMEM;
3237 }
3238
3239 /* Start the Dma */
3240 ret = ipw_fw_dma_enable(priv);
3241
3242 /* the DMA is already ready this would be a bug. */
3243 BUG_ON(priv->sram_desc.last_cb_index > 0);
3244
3245 do {
3246 u32 chunk_len;
3247 u8 *start;
3248 int size;
3249 int nr = 0;
3250
3251 chunk = (struct fw_chunk *)(data + offset);
3252 offset += sizeof(struct fw_chunk);
3253 chunk_len = le32_to_cpu(chunk->length);
3254 start = data + offset;
3255
3256 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3257 for (i = 0; i < nr; i++) {
3258 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3259 &phys[total_nr]);
3260 if (!virts[total_nr]) {
3261 ret = -ENOMEM;
3262 goto out;
3263 }
3264 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3265 CB_MAX_LENGTH);
3266 memcpy(virts[total_nr], start, size);
3267 start += size;
3268 total_nr++;
3269 /* We don't support fw chunk larger than 64*8K */
3270 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3271 }
3272
3273 /* build DMA packet and queue up for sending */
3274 /* dma to chunk->address, the chunk->length bytes from data +
3275 * offeset*/
3276 /* Dma loading */
3277 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3278 nr, le32_to_cpu(chunk->address),
3279 chunk_len);
3280 if (ret) {
3281 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3282 goto out;
3283 }
3284
3285 offset += chunk_len;
3286 } while (offset < len);
3287
3288 /* Run the DMA and wait for the answer */
3289 ret = ipw_fw_dma_kick(priv);
3290 if (ret) {
3291 IPW_ERROR("dmaKick Failed\n");
3292 goto out;
3293 }
3294
3295 ret = ipw_fw_dma_wait(priv);
3296 if (ret) {
3297 IPW_ERROR("dmaWaitSync Failed\n");
3298 goto out;
3299 }
3300 out:
3301 for (i = 0; i < total_nr; i++)
3302 pci_pool_free(pool, virts[i], phys[i]);
3303
3304 pci_pool_destroy(pool);
3305 kfree(phys);
3306 kfree(virts);
3307
3308 return ret;
3309 }
3310
3311 /* stop nic */
3312 static int ipw_stop_nic(struct ipw_priv *priv)
3313 {
3314 int rc = 0;
3315
3316 /* stop */
3317 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3318
3319 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3320 IPW_RESET_REG_MASTER_DISABLED, 500);
3321 if (rc < 0) {
3322 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3323 return rc;
3324 }
3325
3326 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3327
3328 return rc;
3329 }
3330
3331 static void ipw_start_nic(struct ipw_priv *priv)
3332 {
3333 IPW_DEBUG_TRACE(">>\n");
3334
3335 /* prvHwStartNic release ARC */
3336 ipw_clear_bit(priv, IPW_RESET_REG,
3337 IPW_RESET_REG_MASTER_DISABLED |
3338 IPW_RESET_REG_STOP_MASTER |
3339 CBD_RESET_REG_PRINCETON_RESET);
3340
3341 /* enable power management */
3342 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3343 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3344
3345 IPW_DEBUG_TRACE("<<\n");
3346 }
3347
3348 static int ipw_init_nic(struct ipw_priv *priv)
3349 {
3350 int rc;
3351
3352 IPW_DEBUG_TRACE(">>\n");
3353 /* reset */
3354 /*prvHwInitNic */
3355 /* set "initialization complete" bit to move adapter to D0 state */
3356 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3357
3358 /* low-level PLL activation */
3359 ipw_write32(priv, IPW_READ_INT_REGISTER,
3360 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3361
3362 /* wait for clock stabilization */
3363 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3364 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3365 if (rc < 0)
3366 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3367
3368 /* assert SW reset */
3369 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3370
3371 udelay(10);
3372
3373 /* set "initialization complete" bit to move adapter to D0 state */
3374 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3375
3376 IPW_DEBUG_TRACE(">>\n");
3377 return 0;
3378 }
3379
3380 /* Call this function from process context, it will sleep in request_firmware.
3381 * Probe is an ok place to call this from.
3382 */
3383 static int ipw_reset_nic(struct ipw_priv *priv)
3384 {
3385 int rc = 0;
3386 unsigned long flags;
3387
3388 IPW_DEBUG_TRACE(">>\n");
3389
3390 rc = ipw_init_nic(priv);
3391
3392 spin_lock_irqsave(&priv->lock, flags);
3393 /* Clear the 'host command active' bit... */
3394 priv->status &= ~STATUS_HCMD_ACTIVE;
3395 wake_up_interruptible(&priv->wait_command_queue);
3396 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3397 wake_up_interruptible(&priv->wait_state);
3398 spin_unlock_irqrestore(&priv->lock, flags);
3399
3400 IPW_DEBUG_TRACE("<<\n");
3401 return rc;
3402 }
3403
3404
3405 struct ipw_fw {
3406 __le32 ver;
3407 __le32 boot_size;
3408 __le32 ucode_size;
3409 __le32 fw_size;
3410 u8 data[0];
3411 };
3412
3413 static int ipw_get_fw(struct ipw_priv *priv,
3414 const struct firmware **raw, const char *name)
3415 {
3416 struct ipw_fw *fw;
3417 int rc;
3418
3419 /* ask firmware_class module to get the boot firmware off disk */
3420 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3421 if (rc < 0) {
3422 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3423 return rc;
3424 }
3425
3426 if ((*raw)->size < sizeof(*fw)) {
3427 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3428 return -EINVAL;
3429 }
3430
3431 fw = (void *)(*raw)->data;
3432
3433 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3434 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3435 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3436 name, (*raw)->size);
3437 return -EINVAL;
3438 }
3439
3440 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3441 name,
3442 le32_to_cpu(fw->ver) >> 16,
3443 le32_to_cpu(fw->ver) & 0xff,
3444 (*raw)->size - sizeof(*fw));
3445 return 0;
3446 }
3447
3448 #define IPW_RX_BUF_SIZE (3000)
3449
3450 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3451 struct ipw_rx_queue *rxq)
3452 {
3453 unsigned long flags;
3454 int i;
3455
3456 spin_lock_irqsave(&rxq->lock, flags);
3457
3458 INIT_LIST_HEAD(&rxq->rx_free);
3459 INIT_LIST_HEAD(&rxq->rx_used);
3460
3461 /* Fill the rx_used queue with _all_ of the Rx buffers */
3462 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3463 /* In the reset function, these buffers may have been allocated
3464 * to an SKB, so we need to unmap and free potential storage */
3465 if (rxq->pool[i].skb != NULL) {
3466 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3467 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3468 dev_kfree_skb(rxq->pool[i].skb);
3469 rxq->pool[i].skb = NULL;
3470 }
3471 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3472 }
3473
3474 /* Set us so that we have processed and used all buffers, but have
3475 * not restocked the Rx queue with fresh buffers */
3476 rxq->read = rxq->write = 0;
3477 rxq->free_count = 0;
3478 spin_unlock_irqrestore(&rxq->lock, flags);
3479 }
3480
3481 #ifdef CONFIG_PM
3482 static int fw_loaded = 0;
3483 static const struct firmware *raw = NULL;
3484
3485 static void free_firmware(void)
3486 {
3487 if (fw_loaded) {
3488 release_firmware(raw);
3489 raw = NULL;
3490 fw_loaded = 0;
3491 }
3492 }
3493 #else
3494 #define free_firmware() do {} while (0)
3495 #endif
3496
3497 static int ipw_load(struct ipw_priv *priv)
3498 {
3499 #ifndef CONFIG_PM
3500 const struct firmware *raw = NULL;
3501 #endif
3502 struct ipw_fw *fw;
3503 u8 *boot_img, *ucode_img, *fw_img;
3504 u8 *name = NULL;
3505 int rc = 0, retries = 3;
3506
3507 switch (priv->ieee->iw_mode) {
3508 case IW_MODE_ADHOC:
3509 name = "ipw2200-ibss.fw";
3510 break;
3511 #ifdef CONFIG_IPW2200_MONITOR
3512 case IW_MODE_MONITOR:
3513 name = "ipw2200-sniffer.fw";
3514 break;
3515 #endif
3516 case IW_MODE_INFRA:
3517 name = "ipw2200-bss.fw";
3518 break;
3519 }
3520
3521 if (!name) {
3522 rc = -EINVAL;
3523 goto error;
3524 }
3525
3526 #ifdef CONFIG_PM
3527 if (!fw_loaded) {
3528 #endif
3529 rc = ipw_get_fw(priv, &raw, name);
3530 if (rc < 0)
3531 goto error;
3532 #ifdef CONFIG_PM
3533 }
3534 #endif
3535
3536 fw = (void *)raw->data;
3537 boot_img = &fw->data[0];
3538 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3539 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3540 le32_to_cpu(fw->ucode_size)];
3541
3542 if (rc < 0)
3543 goto error;
3544
3545 if (!priv->rxq)
3546 priv->rxq = ipw_rx_queue_alloc(priv);
3547 else
3548 ipw_rx_queue_reset(priv, priv->rxq);
3549 if (!priv->rxq) {
3550 IPW_ERROR("Unable to initialize Rx queue\n");
3551 goto error;
3552 }
3553
3554 retry:
3555 /* Ensure interrupts are disabled */
3556 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3557 priv->status &= ~STATUS_INT_ENABLED;
3558
3559 /* ack pending interrupts */
3560 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3561
3562 ipw_stop_nic(priv);
3563
3564 rc = ipw_reset_nic(priv);
3565 if (rc < 0) {
3566 IPW_ERROR("Unable to reset NIC\n");
3567 goto error;
3568 }
3569
3570 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3571 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3572
3573 /* DMA the initial boot firmware into the device */
3574 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3575 if (rc < 0) {
3576 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3577 goto error;
3578 }
3579
3580 /* kick start the device */
3581 ipw_start_nic(priv);
3582
3583 /* wait for the device to finish its initial startup sequence */
3584 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3585 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3586 if (rc < 0) {
3587 IPW_ERROR("device failed to boot initial fw image\n");
3588 goto error;
3589 }
3590 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3591
3592 /* ack fw init done interrupt */
3593 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3594
3595 /* DMA the ucode into the device */
3596 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3597 if (rc < 0) {
3598 IPW_ERROR("Unable to load ucode: %d\n", rc);
3599 goto error;
3600 }
3601
3602 /* stop nic */
3603 ipw_stop_nic(priv);
3604
3605 /* DMA bss firmware into the device */
3606 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3607 if (rc < 0) {
3608 IPW_ERROR("Unable to load firmware: %d\n", rc);
3609 goto error;
3610 }
3611 #ifdef CONFIG_PM
3612 fw_loaded = 1;
3613 #endif
3614
3615 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3616
3617 rc = ipw_queue_reset(priv);
3618 if (rc < 0) {
3619 IPW_ERROR("Unable to initialize queues\n");
3620 goto error;
3621 }
3622
3623 /* Ensure interrupts are disabled */
3624 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3625 /* ack pending interrupts */
3626 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3627
3628 /* kick start the device */
3629 ipw_start_nic(priv);
3630
3631 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3632 if (retries > 0) {
3633 IPW_WARNING("Parity error. Retrying init.\n");
3634 retries--;
3635 goto retry;
3636 }
3637
3638 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3639 rc = -EIO;
3640 goto error;
3641 }
3642
3643 /* wait for the device */
3644 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3645 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3646 if (rc < 0) {
3647 IPW_ERROR("device failed to start within 500ms\n");
3648 goto error;
3649 }
3650 IPW_DEBUG_INFO("device response after %dms\n", rc);
3651
3652 /* ack fw init done interrupt */
3653 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3654
3655 /* read eeprom data */
3656 priv->eeprom_delay = 1;
3657 ipw_read_eeprom(priv);
3658 /* initialize the eeprom region of sram */
3659 ipw_eeprom_init_sram(priv);
3660
3661 /* enable interrupts */
3662 ipw_enable_interrupts(priv);
3663
3664 /* Ensure our queue has valid packets */
3665 ipw_rx_queue_replenish(priv);
3666
3667 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3668
3669 /* ack pending interrupts */
3670 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3671
3672 #ifndef CONFIG_PM
3673 release_firmware(raw);
3674 #endif
3675 return 0;
3676
3677 error:
3678 if (priv->rxq) {
3679 ipw_rx_queue_free(priv, priv->rxq);
3680 priv->rxq = NULL;
3681 }
3682 ipw_tx_queue_free(priv);
3683 release_firmware(raw);
3684 #ifdef CONFIG_PM
3685 fw_loaded = 0;
3686 raw = NULL;
3687 #endif
3688
3689 return rc;
3690 }
3691
3692 /**
3693 * DMA services
3694 *
3695 * Theory of operation
3696 *
3697 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3698 * 2 empty entries always kept in the buffer to protect from overflow.
3699 *
3700 * For Tx queue, there are low mark and high mark limits. If, after queuing
3701 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3702 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3703 * Tx queue resumed.
3704 *
3705 * The IPW operates with six queues, one receive queue in the device's
3706 * sram, one transmit queue for sending commands to the device firmware,
3707 * and four transmit queues for data.
3708 *
3709 * The four transmit queues allow for performing quality of service (qos)
3710 * transmissions as per the 802.11 protocol. Currently Linux does not
3711 * provide a mechanism to the user for utilizing prioritized queues, so
3712 * we only utilize the first data transmit queue (queue1).
3713 */
3714
3715 /**
3716 * Driver allocates buffers of this size for Rx
3717 */
3718
3719 /**
3720 * ipw_rx_queue_space - Return number of free slots available in queue.
3721 */
3722 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3723 {
3724 int s = q->read - q->write;
3725 if (s <= 0)
3726 s += RX_QUEUE_SIZE;
3727 /* keep some buffer to not confuse full and empty queue */
3728 s -= 2;
3729 if (s < 0)
3730 s = 0;
3731 return s;
3732 }
3733
3734 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3735 {
3736 int s = q->last_used - q->first_empty;
3737 if (s <= 0)
3738 s += q->n_bd;
3739 s -= 2; /* keep some reserve to not confuse empty and full situations */
3740 if (s < 0)
3741 s = 0;
3742 return s;
3743 }
3744
3745 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3746 {
3747 return (++index == n_bd) ? 0 : index;
3748 }
3749
3750 /**
3751 * Initialize common DMA queue structure
3752 *
3753 * @param q queue to init
3754 * @param count Number of BD's to allocate. Should be power of 2
3755 * @param read_register Address for 'read' register
3756 * (not offset within BAR, full address)
3757 * @param write_register Address for 'write' register
3758 * (not offset within BAR, full address)
3759 * @param base_register Address for 'base' register
3760 * (not offset within BAR, full address)
3761 * @param size Address for 'size' register
3762 * (not offset within BAR, full address)
3763 */
3764 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3765 int count, u32 read, u32 write, u32 base, u32 size)
3766 {
3767 q->n_bd = count;
3768
3769 q->low_mark = q->n_bd / 4;
3770 if (q->low_mark < 4)
3771 q->low_mark = 4;
3772
3773 q->high_mark = q->n_bd / 8;
3774 if (q->high_mark < 2)
3775 q->high_mark = 2;
3776
3777 q->first_empty = q->last_used = 0;
3778 q->reg_r = read;
3779 q->reg_w = write;
3780
3781 ipw_write32(priv, base, q->dma_addr);
3782 ipw_write32(priv, size, count);
3783 ipw_write32(priv, read, 0);
3784 ipw_write32(priv, write, 0);
3785
3786 _ipw_read32(priv, 0x90);
3787 }
3788
3789 static int ipw_queue_tx_init(struct ipw_priv *priv,
3790 struct clx2_tx_queue *q,
3791 int count, u32 read, u32 write, u32 base, u32 size)
3792 {
3793 struct pci_dev *dev = priv->pci_dev;
3794
3795 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3796 if (!q->txb) {
3797 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3798 return -ENOMEM;
3799 }
3800
3801 q->bd =
3802 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3803 if (!q->bd) {
3804 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3805 sizeof(q->bd[0]) * count);
3806 kfree(q->txb);
3807 q->txb = NULL;
3808 return -ENOMEM;
3809 }
3810
3811 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3812 return 0;
3813 }
3814
3815 /**
3816 * Free one TFD, those at index [txq->q.last_used].
3817 * Do NOT advance any indexes
3818 *
3819 * @param dev
3820 * @param txq
3821 */
3822 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3823 struct clx2_tx_queue *txq)
3824 {
3825 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3826 struct pci_dev *dev = priv->pci_dev;
3827 int i;
3828
3829 /* classify bd */
3830 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3831 /* nothing to cleanup after for host commands */
3832 return;
3833
3834 /* sanity check */
3835 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3836 IPW_ERROR("Too many chunks: %i\n",
3837 le32_to_cpu(bd->u.data.num_chunks));
3838 /** @todo issue fatal error, it is quite serious situation */
3839 return;
3840 }
3841
3842 /* unmap chunks if any */
3843 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3844 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3845 le16_to_cpu(bd->u.data.chunk_len[i]),
3846 PCI_DMA_TODEVICE);
3847 if (txq->txb[txq->q.last_used]) {
3848 libipw_txb_free(txq->txb[txq->q.last_used]);
3849 txq->txb[txq->q.last_used] = NULL;
3850 }
3851 }
3852 }
3853
3854 /**
3855 * Deallocate DMA queue.
3856 *
3857 * Empty queue by removing and destroying all BD's.
3858 * Free all buffers.
3859 *
3860 * @param dev
3861 * @param q
3862 */
3863 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3864 {
3865 struct clx2_queue *q = &txq->q;
3866 struct pci_dev *dev = priv->pci_dev;
3867
3868 if (q->n_bd == 0)
3869 return;
3870
3871 /* first, empty all BD's */
3872 for (; q->first_empty != q->last_used;
3873 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3874 ipw_queue_tx_free_tfd(priv, txq);
3875 }
3876
3877 /* free buffers belonging to queue itself */
3878 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3879 q->dma_addr);
3880 kfree(txq->txb);
3881
3882 /* 0 fill whole structure */
3883 memset(txq, 0, sizeof(*txq));
3884 }
3885
3886 /**
3887 * Destroy all DMA queues and structures
3888 *
3889 * @param priv
3890 */
3891 static void ipw_tx_queue_free(struct ipw_priv *priv)
3892 {
3893 /* Tx CMD queue */
3894 ipw_queue_tx_free(priv, &priv->txq_cmd);
3895
3896 /* Tx queues */
3897 ipw_queue_tx_free(priv, &priv->txq[0]);
3898 ipw_queue_tx_free(priv, &priv->txq[1]);
3899 ipw_queue_tx_free(priv, &priv->txq[2]);
3900 ipw_queue_tx_free(priv, &priv->txq[3]);
3901 }
3902
3903 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3904 {
3905 /* First 3 bytes are manufacturer */
3906 bssid[0] = priv->mac_addr[0];
3907 bssid[1] = priv->mac_addr[1];
3908 bssid[2] = priv->mac_addr[2];
3909
3910 /* Last bytes are random */
3911 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3912
3913 bssid[0] &= 0xfe; /* clear multicast bit */
3914 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3915 }
3916
3917 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3918 {
3919 struct ipw_station_entry entry;
3920 int i;
3921
3922 for (i = 0; i < priv->num_stations; i++) {
3923 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3924 /* Another node is active in network */
3925 priv->missed_adhoc_beacons = 0;
3926 if (!(priv->config & CFG_STATIC_CHANNEL))
3927 /* when other nodes drop out, we drop out */
3928 priv->config &= ~CFG_ADHOC_PERSIST;
3929
3930 return i;
3931 }
3932 }
3933
3934 if (i == MAX_STATIONS)
3935 return IPW_INVALID_STATION;
3936
3937 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3938
3939 entry.reserved = 0;
3940 entry.support_mode = 0;
3941 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3942 memcpy(priv->stations[i], bssid, ETH_ALEN);
3943 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3944 &entry, sizeof(entry));
3945 priv->num_stations++;
3946
3947 return i;
3948 }
3949
3950 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3951 {
3952 int i;
3953
3954 for (i = 0; i < priv->num_stations; i++)
3955 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3956 return i;
3957
3958 return IPW_INVALID_STATION;
3959 }
3960
3961 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3962 {
3963 int err;
3964
3965 if (priv->status & STATUS_ASSOCIATING) {
3966 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3967 schedule_work(&priv->disassociate);
3968 return;
3969 }
3970
3971 if (!(priv->status & STATUS_ASSOCIATED)) {
3972 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3973 return;
3974 }
3975
3976 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3977 "on channel %d.\n",
3978 priv->assoc_request.bssid,
3979 priv->assoc_request.channel);
3980
3981 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3982 priv->status |= STATUS_DISASSOCIATING;
3983
3984 if (quiet)
3985 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3986 else
3987 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3988
3989 err = ipw_send_associate(priv, &priv->assoc_request);
3990 if (err) {
3991 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3992 "failed.\n");
3993 return;
3994 }
3995
3996 }
3997
3998 static int ipw_disassociate(void *data)
3999 {
4000 struct ipw_priv *priv = data;
4001 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
4002 return 0;
4003 ipw_send_disassociate(data, 0);
4004 netif_carrier_off(priv->net_dev);
4005 return 1;
4006 }
4007
4008 static void ipw_bg_disassociate(struct work_struct *work)
4009 {
4010 struct ipw_priv *priv =
4011 container_of(work, struct ipw_priv, disassociate);
4012 mutex_lock(&priv->mutex);
4013 ipw_disassociate(priv);
4014 mutex_unlock(&priv->mutex);
4015 }
4016
4017 static void ipw_system_config(struct work_struct *work)
4018 {
4019 struct ipw_priv *priv =
4020 container_of(work, struct ipw_priv, system_config);
4021
4022 #ifdef CONFIG_IPW2200_PROMISCUOUS
4023 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4024 priv->sys_config.accept_all_data_frames = 1;
4025 priv->sys_config.accept_non_directed_frames = 1;
4026 priv->sys_config.accept_all_mgmt_bcpr = 1;
4027 priv->sys_config.accept_all_mgmt_frames = 1;
4028 }
4029 #endif
4030
4031 ipw_send_system_config(priv);
4032 }
4033
4034 struct ipw_status_code {
4035 u16 status;
4036 const char *reason;
4037 };
4038
4039 static const struct ipw_status_code ipw_status_codes[] = {
4040 {0x00, "Successful"},
4041 {0x01, "Unspecified failure"},
4042 {0x0A, "Cannot support all requested capabilities in the "
4043 "Capability information field"},
4044 {0x0B, "Reassociation denied due to inability to confirm that "
4045 "association exists"},
4046 {0x0C, "Association denied due to reason outside the scope of this "
4047 "standard"},
4048 {0x0D,
4049 "Responding station does not support the specified authentication "
4050 "algorithm"},
4051 {0x0E,
4052 "Received an Authentication frame with authentication sequence "
4053 "transaction sequence number out of expected sequence"},
4054 {0x0F, "Authentication rejected because of challenge failure"},
4055 {0x10, "Authentication rejected due to timeout waiting for next "
4056 "frame in sequence"},
4057 {0x11, "Association denied because AP is unable to handle additional "
4058 "associated stations"},
4059 {0x12,
4060 "Association denied due to requesting station not supporting all "
4061 "of the datarates in the BSSBasicServiceSet Parameter"},
4062 {0x13,
4063 "Association denied due to requesting station not supporting "
4064 "short preamble operation"},
4065 {0x14,
4066 "Association denied due to requesting station not supporting "
4067 "PBCC encoding"},
4068 {0x15,
4069 "Association denied due to requesting station not supporting "
4070 "channel agility"},
4071 {0x19,
4072 "Association denied due to requesting station not supporting "
4073 "short slot operation"},
4074 {0x1A,
4075 "Association denied due to requesting station not supporting "
4076 "DSSS-OFDM operation"},
4077 {0x28, "Invalid Information Element"},
4078 {0x29, "Group Cipher is not valid"},
4079 {0x2A, "Pairwise Cipher is not valid"},
4080 {0x2B, "AKMP is not valid"},
4081 {0x2C, "Unsupported RSN IE version"},
4082 {0x2D, "Invalid RSN IE Capabilities"},
4083 {0x2E, "Cipher suite is rejected per security policy"},
4084 };
4085
4086 static const char *ipw_get_status_code(u16 status)
4087 {
4088 int i;
4089 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4090 if (ipw_status_codes[i].status == (status & 0xff))
4091 return ipw_status_codes[i].reason;
4092 return "Unknown status value.";
4093 }
4094
4095 static void inline average_init(struct average *avg)
4096 {
4097 memset(avg, 0, sizeof(*avg));
4098 }
4099
4100 #define DEPTH_RSSI 8
4101 #define DEPTH_NOISE 16
4102 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4103 {
4104 return ((depth-1)*prev_avg + val)/depth;
4105 }
4106
4107 static void average_add(struct average *avg, s16 val)
4108 {
4109 avg->sum -= avg->entries[avg->pos];
4110 avg->sum += val;
4111 avg->entries[avg->pos++] = val;
4112 if (unlikely(avg->pos == AVG_ENTRIES)) {
4113 avg->init = 1;
4114 avg->pos = 0;
4115 }
4116 }
4117
4118 static s16 average_value(struct average *avg)
4119 {
4120 if (!unlikely(avg->init)) {
4121 if (avg->pos)
4122 return avg->sum / avg->pos;
4123 return 0;
4124 }
4125
4126 return avg->sum / AVG_ENTRIES;
4127 }
4128
4129 static void ipw_reset_stats(struct ipw_priv *priv)
4130 {
4131 u32 len = sizeof(u32);
4132
4133 priv->quality = 0;
4134
4135 average_init(&priv->average_missed_beacons);
4136 priv->exp_avg_rssi = -60;
4137 priv->exp_avg_noise = -85 + 0x100;
4138
4139 priv->last_rate = 0;
4140 priv->last_missed_beacons = 0;
4141 priv->last_rx_packets = 0;
4142 priv->last_tx_packets = 0;
4143 priv->last_tx_failures = 0;
4144
4145 /* Firmware managed, reset only when NIC is restarted, so we have to
4146 * normalize on the current value */
4147 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4148 &priv->last_rx_err, &len);
4149 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4150 &priv->last_tx_failures, &len);
4151
4152 /* Driver managed, reset with each association */
4153 priv->missed_adhoc_beacons = 0;
4154 priv->missed_beacons = 0;
4155 priv->tx_packets = 0;
4156 priv->rx_packets = 0;
4157
4158 }
4159
4160 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4161 {
4162 u32 i = 0x80000000;
4163 u32 mask = priv->rates_mask;
4164 /* If currently associated in B mode, restrict the maximum
4165 * rate match to B rates */
4166 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4167 mask &= LIBIPW_CCK_RATES_MASK;
4168
4169 /* TODO: Verify that the rate is supported by the current rates
4170 * list. */
4171
4172 while (i && !(mask & i))
4173 i >>= 1;
4174 switch (i) {
4175 case LIBIPW_CCK_RATE_1MB_MASK:
4176 return 1000000;
4177 case LIBIPW_CCK_RATE_2MB_MASK:
4178 return 2000000;
4179 case LIBIPW_CCK_RATE_5MB_MASK:
4180 return 5500000;
4181 case LIBIPW_OFDM_RATE_6MB_MASK:
4182 return 6000000;
4183 case LIBIPW_OFDM_RATE_9MB_MASK:
4184 return 9000000;
4185 case LIBIPW_CCK_RATE_11MB_MASK:
4186 return 11000000;
4187 case LIBIPW_OFDM_RATE_12MB_MASK:
4188 return 12000000;
4189 case LIBIPW_OFDM_RATE_18MB_MASK:
4190 return 18000000;
4191 case LIBIPW_OFDM_RATE_24MB_MASK:
4192 return 24000000;
4193 case LIBIPW_OFDM_RATE_36MB_MASK:
4194 return 36000000;
4195 case LIBIPW_OFDM_RATE_48MB_MASK:
4196 return 48000000;
4197 case LIBIPW_OFDM_RATE_54MB_MASK:
4198 return 54000000;
4199 }
4200
4201 if (priv->ieee->mode == IEEE_B)
4202 return 11000000;
4203 else
4204 return 54000000;
4205 }
4206
4207 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4208 {
4209 u32 rate, len = sizeof(rate);
4210 int err;
4211
4212 if (!(priv->status & STATUS_ASSOCIATED))
4213 return 0;
4214
4215 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4216 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4217 &len);
4218 if (err) {
4219 IPW_DEBUG_INFO("failed querying ordinals.\n");
4220 return 0;
4221 }
4222 } else
4223 return ipw_get_max_rate(priv);
4224
4225 switch (rate) {
4226 case IPW_TX_RATE_1MB:
4227 return 1000000;
4228 case IPW_TX_RATE_2MB:
4229 return 2000000;
4230 case IPW_TX_RATE_5MB:
4231 return 5500000;
4232 case IPW_TX_RATE_6MB:
4233 return 6000000;
4234 case IPW_TX_RATE_9MB:
4235 return 9000000;
4236 case IPW_TX_RATE_11MB:
4237 return 11000000;
4238 case IPW_TX_RATE_12MB:
4239 return 12000000;
4240 case IPW_TX_RATE_18MB:
4241 return 18000000;
4242 case IPW_TX_RATE_24MB:
4243 return 24000000;
4244 case IPW_TX_RATE_36MB:
4245 return 36000000;
4246 case IPW_TX_RATE_48MB:
4247 return 48000000;
4248 case IPW_TX_RATE_54MB:
4249 return 54000000;
4250 }
4251
4252 return 0;
4253 }
4254
4255 #define IPW_STATS_INTERVAL (2 * HZ)
4256 static void ipw_gather_stats(struct ipw_priv *priv)
4257 {
4258 u32 rx_err, rx_err_delta, rx_packets_delta;
4259 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4260 u32 missed_beacons_percent, missed_beacons_delta;
4261 u32 quality = 0;
4262 u32 len = sizeof(u32);
4263 s16 rssi;
4264 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4265 rate_quality;
4266 u32 max_rate;
4267
4268 if (!(priv->status & STATUS_ASSOCIATED)) {
4269 priv->quality = 0;
4270 return;
4271 }
4272
4273 /* Update the statistics */
4274 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4275 &priv->missed_beacons, &len);
4276 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4277 priv->last_missed_beacons = priv->missed_beacons;
4278 if (priv->assoc_request.beacon_interval) {
4279 missed_beacons_percent = missed_beacons_delta *
4280 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4281 (IPW_STATS_INTERVAL * 10);
4282 } else {
4283 missed_beacons_percent = 0;
4284 }
4285 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4286
4287 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4288 rx_err_delta = rx_err - priv->last_rx_err;
4289 priv->last_rx_err = rx_err;
4290
4291 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4292 tx_failures_delta = tx_failures - priv->last_tx_failures;
4293 priv->last_tx_failures = tx_failures;
4294
4295 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4296 priv->last_rx_packets = priv->rx_packets;
4297
4298 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4299 priv->last_tx_packets = priv->tx_packets;
4300
4301 /* Calculate quality based on the following:
4302 *
4303 * Missed beacon: 100% = 0, 0% = 70% missed
4304 * Rate: 60% = 1Mbs, 100% = Max
4305 * Rx and Tx errors represent a straight % of total Rx/Tx
4306 * RSSI: 100% = > -50, 0% = < -80
4307 * Rx errors: 100% = 0, 0% = 50% missed
4308 *
4309 * The lowest computed quality is used.
4310 *
4311 */
4312 #define BEACON_THRESHOLD 5
4313 beacon_quality = 100 - missed_beacons_percent;
4314 if (beacon_quality < BEACON_THRESHOLD)
4315 beacon_quality = 0;
4316 else
4317 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4318 (100 - BEACON_THRESHOLD);
4319 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4320 beacon_quality, missed_beacons_percent);
4321
4322 priv->last_rate = ipw_get_current_rate(priv);
4323 max_rate = ipw_get_max_rate(priv);
4324 rate_quality = priv->last_rate * 40 / max_rate + 60;
4325 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4326 rate_quality, priv->last_rate / 1000000);
4327
4328 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4329 rx_quality = 100 - (rx_err_delta * 100) /
4330 (rx_packets_delta + rx_err_delta);
4331 else
4332 rx_quality = 100;
4333 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4334 rx_quality, rx_err_delta, rx_packets_delta);
4335
4336 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4337 tx_quality = 100 - (tx_failures_delta * 100) /
4338 (tx_packets_delta + tx_failures_delta);
4339 else
4340 tx_quality = 100;
4341 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4342 tx_quality, tx_failures_delta, tx_packets_delta);
4343
4344 rssi = priv->exp_avg_rssi;
4345 signal_quality =
4346 (100 *
4347 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4348 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4349 (priv->ieee->perfect_rssi - rssi) *
4350 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4351 62 * (priv->ieee->perfect_rssi - rssi))) /
4352 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4353 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4354 if (signal_quality > 100)
4355 signal_quality = 100;
4356 else if (signal_quality < 1)
4357 signal_quality = 0;
4358
4359 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4360 signal_quality, rssi);
4361
4362 quality = min(rx_quality, signal_quality);
4363 quality = min(tx_quality, quality);
4364 quality = min(rate_quality, quality);
4365 quality = min(beacon_quality, quality);
4366 if (quality == beacon_quality)
4367 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4368 quality);
4369 if (quality == rate_quality)
4370 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4371 quality);
4372 if (quality == tx_quality)
4373 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4374 quality);
4375 if (quality == rx_quality)
4376 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4377 quality);
4378 if (quality == signal_quality)
4379 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4380 quality);
4381
4382 priv->quality = quality;
4383
4384 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4385 }
4386
4387 static void ipw_bg_gather_stats(struct work_struct *work)
4388 {
4389 struct ipw_priv *priv =
4390 container_of(work, struct ipw_priv, gather_stats.work);
4391 mutex_lock(&priv->mutex);
4392 ipw_gather_stats(priv);
4393 mutex_unlock(&priv->mutex);
4394 }
4395
4396 /* Missed beacon behavior:
4397 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4398 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4399 * Above disassociate threshold, give up and stop scanning.
4400 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4401 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4402 int missed_count)
4403 {
4404 priv->notif_missed_beacons = missed_count;
4405
4406 if (missed_count > priv->disassociate_threshold &&
4407 priv->status & STATUS_ASSOCIATED) {
4408 /* If associated and we've hit the missed
4409 * beacon threshold, disassociate, turn
4410 * off roaming, and abort any active scans */
4411 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4412 IPW_DL_STATE | IPW_DL_ASSOC,
4413 "Missed beacon: %d - disassociate\n", missed_count);
4414 priv->status &= ~STATUS_ROAMING;
4415 if (priv->status & STATUS_SCANNING) {
4416 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4417 IPW_DL_STATE,
4418 "Aborting scan with missed beacon.\n");
4419 schedule_work(&priv->abort_scan);
4420 }
4421
4422 schedule_work(&priv->disassociate);
4423 return;
4424 }
4425
4426 if (priv->status & STATUS_ROAMING) {
4427 /* If we are currently roaming, then just
4428 * print a debug statement... */
4429 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4430 "Missed beacon: %d - roam in progress\n",
4431 missed_count);
4432 return;
4433 }
4434
4435 if (roaming &&
4436 (missed_count > priv->roaming_threshold &&
4437 missed_count <= priv->disassociate_threshold)) {
4438 /* If we are not already roaming, set the ROAM
4439 * bit in the status and kick off a scan.
4440 * This can happen several times before we reach
4441 * disassociate_threshold. */
4442 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4443 "Missed beacon: %d - initiate "
4444 "roaming\n", missed_count);
4445 if (!(priv->status & STATUS_ROAMING)) {
4446 priv->status |= STATUS_ROAMING;
4447 if (!(priv->status & STATUS_SCANNING))
4448 schedule_delayed_work(&priv->request_scan, 0);
4449 }
4450 return;
4451 }
4452
4453 if (priv->status & STATUS_SCANNING &&
4454 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4455 /* Stop scan to keep fw from getting
4456 * stuck (only if we aren't roaming --
4457 * otherwise we'll never scan more than 2 or 3
4458 * channels..) */
4459 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4460 "Aborting scan with missed beacon.\n");
4461 schedule_work(&priv->abort_scan);
4462 }
4463
4464 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4465 }
4466
4467 static void ipw_scan_event(struct work_struct *work)
4468 {
4469 union iwreq_data wrqu;
4470
4471 struct ipw_priv *priv =
4472 container_of(work, struct ipw_priv, scan_event.work);
4473
4474 wrqu.data.length = 0;
4475 wrqu.data.flags = 0;
4476 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4477 }
4478
4479 static void handle_scan_event(struct ipw_priv *priv)
4480 {
4481 /* Only userspace-requested scan completion events go out immediately */
4482 if (!priv->user_requested_scan) {
4483 schedule_delayed_work(&priv->scan_event,
4484 round_jiffies_relative(msecs_to_jiffies(4000)));
4485 } else {
4486 priv->user_requested_scan = 0;
4487 mod_delayed_work(system_wq, &priv->scan_event, 0);
4488 }
4489 }
4490
4491 /**
4492 * Handle host notification packet.
4493 * Called from interrupt routine
4494 */
4495 static void ipw_rx_notification(struct ipw_priv *priv,
4496 struct ipw_rx_notification *notif)
4497 {
4498 DECLARE_SSID_BUF(ssid);
4499 u16 size = le16_to_cpu(notif->size);
4500
4501 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4502
4503 switch (notif->subtype) {
4504 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4505 struct notif_association *assoc = &notif->u.assoc;
4506
4507 switch (assoc->state) {
4508 case CMAS_ASSOCIATED:{
4509 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4510 IPW_DL_ASSOC,
4511 "associated: '%s' %pM\n",
4512 print_ssid(ssid, priv->essid,
4513 priv->essid_len),
4514 priv->bssid);
4515
4516 switch (priv->ieee->iw_mode) {
4517 case IW_MODE_INFRA:
4518 memcpy(priv->ieee->bssid,
4519 priv->bssid, ETH_ALEN);
4520 break;
4521
4522 case IW_MODE_ADHOC:
4523 memcpy(priv->ieee->bssid,
4524 priv->bssid, ETH_ALEN);
4525
4526 /* clear out the station table */
4527 priv->num_stations = 0;
4528
4529 IPW_DEBUG_ASSOC
4530 ("queueing adhoc check\n");
4531 schedule_delayed_work(
4532 &priv->adhoc_check,
4533 le16_to_cpu(priv->
4534 assoc_request.
4535 beacon_interval));
4536 break;
4537 }
4538
4539 priv->status &= ~STATUS_ASSOCIATING;
4540 priv->status |= STATUS_ASSOCIATED;
4541 schedule_work(&priv->system_config);
4542
4543 #ifdef CONFIG_IPW2200_QOS
4544 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4545 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4546 if ((priv->status & STATUS_AUTH) &&
4547 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4548 == IEEE80211_STYPE_ASSOC_RESP)) {
4549 if ((sizeof
4550 (struct
4551 libipw_assoc_response)
4552 <= size)
4553 && (size <= 2314)) {
4554 struct
4555 libipw_rx_stats
4556 stats = {
4557 .len = size - 1,
4558 };
4559
4560 IPW_DEBUG_QOS
4561 ("QoS Associate "
4562 "size %d\n", size);
4563 libipw_rx_mgt(priv->
4564 ieee,
4565 (struct
4566 libipw_hdr_4addr
4567 *)
4568 &notif->u.raw, &stats);
4569 }
4570 }
4571 #endif
4572
4573 schedule_work(&priv->link_up);
4574
4575 break;
4576 }
4577
4578 case CMAS_AUTHENTICATED:{
4579 if (priv->
4580 status & (STATUS_ASSOCIATED |
4581 STATUS_AUTH)) {
4582 struct notif_authenticate *auth
4583 = &notif->u.auth;
4584 IPW_DEBUG(IPW_DL_NOTIF |
4585 IPW_DL_STATE |
4586 IPW_DL_ASSOC,
4587 "deauthenticated: '%s' "
4588 "%pM"
4589 ": (0x%04X) - %s\n",
4590 print_ssid(ssid,
4591 priv->
4592 essid,
4593 priv->
4594 essid_len),
4595 priv->bssid,
4596 le16_to_cpu(auth->status),
4597 ipw_get_status_code
4598 (le16_to_cpu
4599 (auth->status)));
4600
4601 priv->status &=
4602 ~(STATUS_ASSOCIATING |
4603 STATUS_AUTH |
4604 STATUS_ASSOCIATED);
4605
4606 schedule_work(&priv->link_down);
4607 break;
4608 }
4609
4610 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4611 IPW_DL_ASSOC,
4612 "authenticated: '%s' %pM\n",
4613 print_ssid(ssid, priv->essid,
4614 priv->essid_len),
4615 priv->bssid);
4616 break;
4617 }
4618
4619 case CMAS_INIT:{
4620 if (priv->status & STATUS_AUTH) {
4621 struct
4622 libipw_assoc_response
4623 *resp;
4624 resp =
4625 (struct
4626 libipw_assoc_response
4627 *)&notif->u.raw;
4628 IPW_DEBUG(IPW_DL_NOTIF |
4629 IPW_DL_STATE |
4630 IPW_DL_ASSOC,
4631 "association failed (0x%04X): %s\n",
4632 le16_to_cpu(resp->status),
4633 ipw_get_status_code
4634 (le16_to_cpu
4635 (resp->status)));
4636 }
4637
4638 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4639 IPW_DL_ASSOC,
4640 "disassociated: '%s' %pM\n",
4641 print_ssid(ssid, priv->essid,
4642 priv->essid_len),
4643 priv->bssid);
4644
4645 priv->status &=
4646 ~(STATUS_DISASSOCIATING |
4647 STATUS_ASSOCIATING |
4648 STATUS_ASSOCIATED | STATUS_AUTH);
4649 if (priv->assoc_network
4650 && (priv->assoc_network->
4651 capability &
4652 WLAN_CAPABILITY_IBSS))
4653 ipw_remove_current_network
4654 (priv);
4655
4656 schedule_work(&priv->link_down);
4657
4658 break;
4659 }
4660
4661 case CMAS_RX_ASSOC_RESP:
4662 break;
4663
4664 default:
4665 IPW_ERROR("assoc: unknown (%d)\n",
4666 assoc->state);
4667 break;
4668 }
4669
4670 break;
4671 }
4672
4673 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4674 struct notif_authenticate *auth = &notif->u.auth;
4675 switch (auth->state) {
4676 case CMAS_AUTHENTICATED:
4677 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4678 "authenticated: '%s' %pM\n",
4679 print_ssid(ssid, priv->essid,
4680 priv->essid_len),
4681 priv->bssid);
4682 priv->status |= STATUS_AUTH;
4683 break;
4684
4685 case CMAS_INIT:
4686 if (priv->status & STATUS_AUTH) {
4687 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4688 IPW_DL_ASSOC,
4689 "authentication failed (0x%04X): %s\n",
4690 le16_to_cpu(auth->status),
4691 ipw_get_status_code(le16_to_cpu
4692 (auth->
4693 status)));
4694 }
4695 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4696 IPW_DL_ASSOC,
4697 "deauthenticated: '%s' %pM\n",
4698 print_ssid(ssid, priv->essid,
4699 priv->essid_len),
4700 priv->bssid);
4701
4702 priv->status &= ~(STATUS_ASSOCIATING |
4703 STATUS_AUTH |
4704 STATUS_ASSOCIATED);
4705
4706 schedule_work(&priv->link_down);
4707 break;
4708
4709 case CMAS_TX_AUTH_SEQ_1:
4710 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4711 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4712 break;
4713 case CMAS_RX_AUTH_SEQ_2:
4714 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4715 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4716 break;
4717 case CMAS_AUTH_SEQ_1_PASS:
4718 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4719 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4720 break;
4721 case CMAS_AUTH_SEQ_1_FAIL:
4722 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4723 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4724 break;
4725 case CMAS_TX_AUTH_SEQ_3:
4726 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4727 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4728 break;
4729 case CMAS_RX_AUTH_SEQ_4:
4730 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4731 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4732 break;
4733 case CMAS_AUTH_SEQ_2_PASS:
4734 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4735 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4736 break;
4737 case CMAS_AUTH_SEQ_2_FAIL:
4738 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4739 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4740 break;
4741 case CMAS_TX_ASSOC:
4742 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4743 IPW_DL_ASSOC, "TX_ASSOC\n");
4744 break;
4745 case CMAS_RX_ASSOC_RESP:
4746 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4747 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4748
4749 break;
4750 case CMAS_ASSOCIATED:
4751 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4752 IPW_DL_ASSOC, "ASSOCIATED\n");
4753 break;
4754 default:
4755 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4756 auth->state);
4757 break;
4758 }
4759 break;
4760 }
4761
4762 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4763 struct notif_channel_result *x =
4764 &notif->u.channel_result;
4765
4766 if (size == sizeof(*x)) {
4767 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4768 x->channel_num);
4769 } else {
4770 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4771 "(should be %zd)\n",
4772 size, sizeof(*x));
4773 }
4774 break;
4775 }
4776
4777 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4778 struct notif_scan_complete *x = &notif->u.scan_complete;
4779 if (size == sizeof(*x)) {
4780 IPW_DEBUG_SCAN
4781 ("Scan completed: type %d, %d channels, "
4782 "%d status\n", x->scan_type,
4783 x->num_channels, x->status);
4784 } else {
4785 IPW_ERROR("Scan completed of wrong size %d "
4786 "(should be %zd)\n",
4787 size, sizeof(*x));
4788 }
4789
4790 priv->status &=
4791 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4792
4793 wake_up_interruptible(&priv->wait_state);
4794 cancel_delayed_work(&priv->scan_check);
4795
4796 if (priv->status & STATUS_EXIT_PENDING)
4797 break;
4798
4799 priv->ieee->scans++;
4800
4801 #ifdef CONFIG_IPW2200_MONITOR
4802 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4803 priv->status |= STATUS_SCAN_FORCED;
4804 schedule_delayed_work(&priv->request_scan, 0);
4805 break;
4806 }
4807 priv->status &= ~STATUS_SCAN_FORCED;
4808 #endif /* CONFIG_IPW2200_MONITOR */
4809
4810 /* Do queued direct scans first */
4811 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4812 schedule_delayed_work(&priv->request_direct_scan, 0);
4813
4814 if (!(priv->status & (STATUS_ASSOCIATED |
4815 STATUS_ASSOCIATING |
4816 STATUS_ROAMING |
4817 STATUS_DISASSOCIATING)))
4818 schedule_work(&priv->associate);
4819 else if (priv->status & STATUS_ROAMING) {
4820 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4821 /* If a scan completed and we are in roam mode, then
4822 * the scan that completed was the one requested as a
4823 * result of entering roam... so, schedule the
4824 * roam work */
4825 schedule_work(&priv->roam);
4826 else
4827 /* Don't schedule if we aborted the scan */
4828 priv->status &= ~STATUS_ROAMING;
4829 } else if (priv->status & STATUS_SCAN_PENDING)
4830 schedule_delayed_work(&priv->request_scan, 0);
4831 else if (priv->config & CFG_BACKGROUND_SCAN
4832 && priv->status & STATUS_ASSOCIATED)
4833 schedule_delayed_work(&priv->request_scan,
4834 round_jiffies_relative(HZ));
4835
4836 /* Send an empty event to user space.
4837 * We don't send the received data on the event because
4838 * it would require us to do complex transcoding, and
4839 * we want to minimise the work done in the irq handler
4840 * Use a request to extract the data.
4841 * Also, we generate this even for any scan, regardless
4842 * on how the scan was initiated. User space can just
4843 * sync on periodic scan to get fresh data...
4844 * Jean II */
4845 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4846 handle_scan_event(priv);
4847 break;
4848 }
4849
4850 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4851 struct notif_frag_length *x = &notif->u.frag_len;
4852
4853 if (size == sizeof(*x))
4854 IPW_ERROR("Frag length: %d\n",
4855 le16_to_cpu(x->frag_length));
4856 else
4857 IPW_ERROR("Frag length of wrong size %d "
4858 "(should be %zd)\n",
4859 size, sizeof(*x));
4860 break;
4861 }
4862
4863 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4864 struct notif_link_deterioration *x =
4865 &notif->u.link_deterioration;
4866
4867 if (size == sizeof(*x)) {
4868 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4869 "link deterioration: type %d, cnt %d\n",
4870 x->silence_notification_type,
4871 x->silence_count);
4872 memcpy(&priv->last_link_deterioration, x,
4873 sizeof(*x));
4874 } else {
4875 IPW_ERROR("Link Deterioration of wrong size %d "
4876 "(should be %zd)\n",
4877 size, sizeof(*x));
4878 }
4879 break;
4880 }
4881
4882 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4883 IPW_ERROR("Dino config\n");
4884 if (priv->hcmd
4885 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4886 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4887
4888 break;
4889 }
4890
4891 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4892 struct notif_beacon_state *x = &notif->u.beacon_state;
4893 if (size != sizeof(*x)) {
4894 IPW_ERROR
4895 ("Beacon state of wrong size %d (should "
4896 "be %zd)\n", size, sizeof(*x));
4897 break;
4898 }
4899
4900 if (le32_to_cpu(x->state) ==
4901 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4902 ipw_handle_missed_beacon(priv,
4903 le32_to_cpu(x->
4904 number));
4905
4906 break;
4907 }
4908
4909 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4910 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4911 if (size == sizeof(*x)) {
4912 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4913 "0x%02x station %d\n",
4914 x->key_state, x->security_type,
4915 x->station_index);
4916 break;
4917 }
4918
4919 IPW_ERROR
4920 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4921 size, sizeof(*x));
4922 break;
4923 }
4924
4925 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4926 struct notif_calibration *x = &notif->u.calibration;
4927
4928 if (size == sizeof(*x)) {
4929 memcpy(&priv->calib, x, sizeof(*x));
4930 IPW_DEBUG_INFO("TODO: Calibration\n");
4931 break;
4932 }
4933
4934 IPW_ERROR
4935 ("Calibration of wrong size %d (should be %zd)\n",
4936 size, sizeof(*x));
4937 break;
4938 }
4939
4940 case HOST_NOTIFICATION_NOISE_STATS:{
4941 if (size == sizeof(u32)) {
4942 priv->exp_avg_noise =
4943 exponential_average(priv->exp_avg_noise,
4944 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4945 DEPTH_NOISE);
4946 break;
4947 }
4948
4949 IPW_ERROR
4950 ("Noise stat is wrong size %d (should be %zd)\n",
4951 size, sizeof(u32));
4952 break;
4953 }
4954
4955 default:
4956 IPW_DEBUG_NOTIF("Unknown notification: "
4957 "subtype=%d,flags=0x%2x,size=%d\n",
4958 notif->subtype, notif->flags, size);
4959 }
4960 }
4961
4962 /**
4963 * Destroys all DMA structures and initialise them again
4964 *
4965 * @param priv
4966 * @return error code
4967 */
4968 static int ipw_queue_reset(struct ipw_priv *priv)
4969 {
4970 int rc = 0;
4971 /** @todo customize queue sizes */
4972 int nTx = 64, nTxCmd = 8;
4973 ipw_tx_queue_free(priv);
4974 /* Tx CMD queue */
4975 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4976 IPW_TX_CMD_QUEUE_READ_INDEX,
4977 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4978 IPW_TX_CMD_QUEUE_BD_BASE,
4979 IPW_TX_CMD_QUEUE_BD_SIZE);
4980 if (rc) {
4981 IPW_ERROR("Tx Cmd queue init failed\n");
4982 goto error;
4983 }
4984 /* Tx queue(s) */
4985 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4986 IPW_TX_QUEUE_0_READ_INDEX,
4987 IPW_TX_QUEUE_0_WRITE_INDEX,
4988 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4989 if (rc) {
4990 IPW_ERROR("Tx 0 queue init failed\n");
4991 goto error;
4992 }
4993 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4994 IPW_TX_QUEUE_1_READ_INDEX,
4995 IPW_TX_QUEUE_1_WRITE_INDEX,
4996 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4997 if (rc) {
4998 IPW_ERROR("Tx 1 queue init failed\n");
4999 goto error;
5000 }
5001 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
5002 IPW_TX_QUEUE_2_READ_INDEX,
5003 IPW_TX_QUEUE_2_WRITE_INDEX,
5004 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
5005 if (rc) {
5006 IPW_ERROR("Tx 2 queue init failed\n");
5007 goto error;
5008 }
5009 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5010 IPW_TX_QUEUE_3_READ_INDEX,
5011 IPW_TX_QUEUE_3_WRITE_INDEX,
5012 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5013 if (rc) {
5014 IPW_ERROR("Tx 3 queue init failed\n");
5015 goto error;
5016 }
5017 /* statistics */
5018 priv->rx_bufs_min = 0;
5019 priv->rx_pend_max = 0;
5020 return rc;
5021
5022 error:
5023 ipw_tx_queue_free(priv);
5024 return rc;
5025 }
5026
5027 /**
5028 * Reclaim Tx queue entries no more used by NIC.
5029 *
5030 * When FW advances 'R' index, all entries between old and
5031 * new 'R' index need to be reclaimed. As result, some free space
5032 * forms. If there is enough free space (> low mark), wake Tx queue.
5033 *
5034 * @note Need to protect against garbage in 'R' index
5035 * @param priv
5036 * @param txq
5037 * @param qindex
5038 * @return Number of used entries remains in the queue
5039 */
5040 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5041 struct clx2_tx_queue *txq, int qindex)
5042 {
5043 u32 hw_tail;
5044 int used;
5045 struct clx2_queue *q = &txq->q;
5046
5047 hw_tail = ipw_read32(priv, q->reg_r);
5048 if (hw_tail >= q->n_bd) {
5049 IPW_ERROR
5050 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5051 hw_tail, q->n_bd);
5052 goto done;
5053 }
5054 for (; q->last_used != hw_tail;
5055 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5056 ipw_queue_tx_free_tfd(priv, txq);
5057 priv->tx_packets++;
5058 }
5059 done:
5060 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5061 (qindex >= 0))
5062 netif_wake_queue(priv->net_dev);
5063 used = q->first_empty - q->last_used;
5064 if (used < 0)
5065 used += q->n_bd;
5066
5067 return used;
5068 }
5069
5070 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5071 int len, int sync)
5072 {
5073 struct clx2_tx_queue *txq = &priv->txq_cmd;
5074 struct clx2_queue *q = &txq->q;
5075 struct tfd_frame *tfd;
5076
5077 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5078 IPW_ERROR("No space for Tx\n");
5079 return -EBUSY;
5080 }
5081
5082 tfd = &txq->bd[q->first_empty];
5083 txq->txb[q->first_empty] = NULL;
5084
5085 memset(tfd, 0, sizeof(*tfd));
5086 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5087 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5088 priv->hcmd_seq++;
5089 tfd->u.cmd.index = hcmd;
5090 tfd->u.cmd.length = len;
5091 memcpy(tfd->u.cmd.payload, buf, len);
5092 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5093 ipw_write32(priv, q->reg_w, q->first_empty);
5094 _ipw_read32(priv, 0x90);
5095
5096 return 0;
5097 }
5098
5099 /*
5100 * Rx theory of operation
5101 *
5102 * The host allocates 32 DMA target addresses and passes the host address
5103 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5104 * 0 to 31
5105 *
5106 * Rx Queue Indexes
5107 * The host/firmware share two index registers for managing the Rx buffers.
5108 *
5109 * The READ index maps to the first position that the firmware may be writing
5110 * to -- the driver can read up to (but not including) this position and get
5111 * good data.
5112 * The READ index is managed by the firmware once the card is enabled.
5113 *
5114 * The WRITE index maps to the last position the driver has read from -- the
5115 * position preceding WRITE is the last slot the firmware can place a packet.
5116 *
5117 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5118 * WRITE = READ.
5119 *
5120 * During initialization the host sets up the READ queue position to the first
5121 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5122 *
5123 * When the firmware places a packet in a buffer it will advance the READ index
5124 * and fire the RX interrupt. The driver can then query the READ index and
5125 * process as many packets as possible, moving the WRITE index forward as it
5126 * resets the Rx queue buffers with new memory.
5127 *
5128 * The management in the driver is as follows:
5129 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5130 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5131 * to replensish the ipw->rxq->rx_free.
5132 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5133 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5134 * 'processed' and 'read' driver indexes as well)
5135 * + A received packet is processed and handed to the kernel network stack,
5136 * detached from the ipw->rxq. The driver 'processed' index is updated.
5137 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5138 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5139 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5140 * were enough free buffers and RX_STALLED is set it is cleared.
5141 *
5142 *
5143 * Driver sequence:
5144 *
5145 * ipw_rx_queue_alloc() Allocates rx_free
5146 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5147 * ipw_rx_queue_restock
5148 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5149 * queue, updates firmware pointers, and updates
5150 * the WRITE index. If insufficient rx_free buffers
5151 * are available, schedules ipw_rx_queue_replenish
5152 *
5153 * -- enable interrupts --
5154 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5155 * READ INDEX, detaching the SKB from the pool.
5156 * Moves the packet buffer from queue to rx_used.
5157 * Calls ipw_rx_queue_restock to refill any empty
5158 * slots.
5159 * ...
5160 *
5161 */
5162
5163 /*
5164 * If there are slots in the RX queue that need to be restocked,
5165 * and we have free pre-allocated buffers, fill the ranks as much
5166 * as we can pulling from rx_free.
5167 *
5168 * This moves the 'write' index forward to catch up with 'processed', and
5169 * also updates the memory address in the firmware to reference the new
5170 * target buffer.
5171 */
5172 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5173 {
5174 struct ipw_rx_queue *rxq = priv->rxq;
5175 struct list_head *element;
5176 struct ipw_rx_mem_buffer *rxb;
5177 unsigned long flags;
5178 int write;
5179
5180 spin_lock_irqsave(&rxq->lock, flags);
5181 write = rxq->write;
5182 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5183 element = rxq->rx_free.next;
5184 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5185 list_del(element);
5186
5187 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5188 rxb->dma_addr);
5189 rxq->queue[rxq->write] = rxb;
5190 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5191 rxq->free_count--;
5192 }
5193 spin_unlock_irqrestore(&rxq->lock, flags);
5194
5195 /* If the pre-allocated buffer pool is dropping low, schedule to
5196 * refill it */
5197 if (rxq->free_count <= RX_LOW_WATERMARK)
5198 schedule_work(&priv->rx_replenish);
5199
5200 /* If we've added more space for the firmware to place data, tell it */
5201 if (write != rxq->write)
5202 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5203 }
5204
5205 /*
5206 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5207 * Also restock the Rx queue via ipw_rx_queue_restock.
5208 *
5209 * This is called as a scheduled work item (except for during intialization)
5210 */
5211 static void ipw_rx_queue_replenish(void *data)
5212 {
5213 struct ipw_priv *priv = data;
5214 struct ipw_rx_queue *rxq = priv->rxq;
5215 struct list_head *element;
5216 struct ipw_rx_mem_buffer *rxb;
5217 unsigned long flags;
5218
5219 spin_lock_irqsave(&rxq->lock, flags);
5220 while (!list_empty(&rxq->rx_used)) {
5221 element = rxq->rx_used.next;
5222 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5223 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5224 if (!rxb->skb) {
5225 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5226 priv->net_dev->name);
5227 /* We don't reschedule replenish work here -- we will
5228 * call the restock method and if it still needs
5229 * more buffers it will schedule replenish */
5230 break;
5231 }
5232 list_del(element);
5233
5234 rxb->dma_addr =
5235 pci_map_single(priv->pci_dev, rxb->skb->data,
5236 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5237
5238 list_add_tail(&rxb->list, &rxq->rx_free);
5239 rxq->free_count++;
5240 }
5241 spin_unlock_irqrestore(&rxq->lock, flags);
5242
5243 ipw_rx_queue_restock(priv);
5244 }
5245
5246 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5247 {
5248 struct ipw_priv *priv =
5249 container_of(work, struct ipw_priv, rx_replenish);
5250 mutex_lock(&priv->mutex);
5251 ipw_rx_queue_replenish(priv);
5252 mutex_unlock(&priv->mutex);
5253 }
5254
5255 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5256 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5257 * This free routine walks the list of POOL entries and if SKB is set to
5258 * non NULL it is unmapped and freed
5259 */
5260 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5261 {
5262 int i;
5263
5264 if (!rxq)
5265 return;
5266
5267 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5268 if (rxq->pool[i].skb != NULL) {
5269 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5270 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5271 dev_kfree_skb(rxq->pool[i].skb);
5272 }
5273 }
5274
5275 kfree(rxq);
5276 }
5277
5278 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5279 {
5280 struct ipw_rx_queue *rxq;
5281 int i;
5282
5283 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5284 if (unlikely(!rxq)) {
5285 IPW_ERROR("memory allocation failed\n");
5286 return NULL;
5287 }
5288 spin_lock_init(&rxq->lock);
5289 INIT_LIST_HEAD(&rxq->rx_free);
5290 INIT_LIST_HEAD(&rxq->rx_used);
5291
5292 /* Fill the rx_used queue with _all_ of the Rx buffers */
5293 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5294 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5295
5296 /* Set us so that we have processed and used all buffers, but have
5297 * not restocked the Rx queue with fresh buffers */
5298 rxq->read = rxq->write = 0;
5299 rxq->free_count = 0;
5300
5301 return rxq;
5302 }
5303
5304 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5305 {
5306 rate &= ~LIBIPW_BASIC_RATE_MASK;
5307 if (ieee_mode == IEEE_A) {
5308 switch (rate) {
5309 case LIBIPW_OFDM_RATE_6MB:
5310 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5311 1 : 0;
5312 case LIBIPW_OFDM_RATE_9MB:
5313 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5314 1 : 0;
5315 case LIBIPW_OFDM_RATE_12MB:
5316 return priv->
5317 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5318 case LIBIPW_OFDM_RATE_18MB:
5319 return priv->
5320 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5321 case LIBIPW_OFDM_RATE_24MB:
5322 return priv->
5323 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5324 case LIBIPW_OFDM_RATE_36MB:
5325 return priv->
5326 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5327 case LIBIPW_OFDM_RATE_48MB:
5328 return priv->
5329 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5330 case LIBIPW_OFDM_RATE_54MB:
5331 return priv->
5332 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5333 default:
5334 return 0;
5335 }
5336 }
5337
5338 /* B and G mixed */
5339 switch (rate) {
5340 case LIBIPW_CCK_RATE_1MB:
5341 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5342 case LIBIPW_CCK_RATE_2MB:
5343 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5344 case LIBIPW_CCK_RATE_5MB:
5345 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5346 case LIBIPW_CCK_RATE_11MB:
5347 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5348 }
5349
5350 /* If we are limited to B modulations, bail at this point */
5351 if (ieee_mode == IEEE_B)
5352 return 0;
5353
5354 /* G */
5355 switch (rate) {
5356 case LIBIPW_OFDM_RATE_6MB:
5357 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5358 case LIBIPW_OFDM_RATE_9MB:
5359 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5360 case LIBIPW_OFDM_RATE_12MB:
5361 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5362 case LIBIPW_OFDM_RATE_18MB:
5363 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5364 case LIBIPW_OFDM_RATE_24MB:
5365 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5366 case LIBIPW_OFDM_RATE_36MB:
5367 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5368 case LIBIPW_OFDM_RATE_48MB:
5369 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5370 case LIBIPW_OFDM_RATE_54MB:
5371 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5372 }
5373
5374 return 0;
5375 }
5376
5377 static int ipw_compatible_rates(struct ipw_priv *priv,
5378 const struct libipw_network *network,
5379 struct ipw_supported_rates *rates)
5380 {
5381 int num_rates, i;
5382
5383 memset(rates, 0, sizeof(*rates));
5384 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5385 rates->num_rates = 0;
5386 for (i = 0; i < num_rates; i++) {
5387 if (!ipw_is_rate_in_mask(priv, network->mode,
5388 network->rates[i])) {
5389
5390 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5391 IPW_DEBUG_SCAN("Adding masked mandatory "
5392 "rate %02X\n",
5393 network->rates[i]);
5394 rates->supported_rates[rates->num_rates++] =
5395 network->rates[i];
5396 continue;
5397 }
5398
5399 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5400 network->rates[i], priv->rates_mask);
5401 continue;
5402 }
5403
5404 rates->supported_rates[rates->num_rates++] = network->rates[i];
5405 }
5406
5407 num_rates = min(network->rates_ex_len,
5408 (u8) (IPW_MAX_RATES - num_rates));
5409 for (i = 0; i < num_rates; i++) {
5410 if (!ipw_is_rate_in_mask(priv, network->mode,
5411 network->rates_ex[i])) {
5412 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5413 IPW_DEBUG_SCAN("Adding masked mandatory "
5414 "rate %02X\n",
5415 network->rates_ex[i]);
5416 rates->supported_rates[rates->num_rates++] =
5417 network->rates[i];
5418 continue;
5419 }
5420
5421 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5422 network->rates_ex[i], priv->rates_mask);
5423 continue;
5424 }
5425
5426 rates->supported_rates[rates->num_rates++] =
5427 network->rates_ex[i];
5428 }
5429
5430 return 1;
5431 }
5432
5433 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5434 const struct ipw_supported_rates *src)
5435 {
5436 u8 i;
5437 for (i = 0; i < src->num_rates; i++)
5438 dest->supported_rates[i] = src->supported_rates[i];
5439 dest->num_rates = src->num_rates;
5440 }
5441
5442 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5443 * mask should ever be used -- right now all callers to add the scan rates are
5444 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5445 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5446 u8 modulation, u32 rate_mask)
5447 {
5448 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5449 LIBIPW_BASIC_RATE_MASK : 0;
5450
5451 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5452 rates->supported_rates[rates->num_rates++] =
5453 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5454
5455 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5456 rates->supported_rates[rates->num_rates++] =
5457 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5458
5459 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5460 rates->supported_rates[rates->num_rates++] = basic_mask |
5461 LIBIPW_CCK_RATE_5MB;
5462
5463 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5464 rates->supported_rates[rates->num_rates++] = basic_mask |
5465 LIBIPW_CCK_RATE_11MB;
5466 }
5467
5468 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5469 u8 modulation, u32 rate_mask)
5470 {
5471 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5472 LIBIPW_BASIC_RATE_MASK : 0;
5473
5474 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5475 rates->supported_rates[rates->num_rates++] = basic_mask |
5476 LIBIPW_OFDM_RATE_6MB;
5477
5478 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5479 rates->supported_rates[rates->num_rates++] =
5480 LIBIPW_OFDM_RATE_9MB;
5481
5482 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5483 rates->supported_rates[rates->num_rates++] = basic_mask |
5484 LIBIPW_OFDM_RATE_12MB;
5485
5486 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5487 rates->supported_rates[rates->num_rates++] =
5488 LIBIPW_OFDM_RATE_18MB;
5489
5490 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5491 rates->supported_rates[rates->num_rates++] = basic_mask |
5492 LIBIPW_OFDM_RATE_24MB;
5493
5494 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5495 rates->supported_rates[rates->num_rates++] =
5496 LIBIPW_OFDM_RATE_36MB;
5497
5498 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5499 rates->supported_rates[rates->num_rates++] =
5500 LIBIPW_OFDM_RATE_48MB;
5501
5502 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5503 rates->supported_rates[rates->num_rates++] =
5504 LIBIPW_OFDM_RATE_54MB;
5505 }
5506
5507 struct ipw_network_match {
5508 struct libipw_network *network;
5509 struct ipw_supported_rates rates;
5510 };
5511
5512 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5513 struct ipw_network_match *match,
5514 struct libipw_network *network,
5515 int roaming)
5516 {
5517 struct ipw_supported_rates rates;
5518 DECLARE_SSID_BUF(ssid);
5519
5520 /* Verify that this network's capability is compatible with the
5521 * current mode (AdHoc or Infrastructure) */
5522 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5523 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5524 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5525 "capability mismatch.\n",
5526 print_ssid(ssid, network->ssid,
5527 network->ssid_len),
5528 network->bssid);
5529 return 0;
5530 }
5531
5532 if (unlikely(roaming)) {
5533 /* If we are roaming, then ensure check if this is a valid
5534 * network to try and roam to */
5535 if ((network->ssid_len != match->network->ssid_len) ||
5536 memcmp(network->ssid, match->network->ssid,
5537 network->ssid_len)) {
5538 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5539 "because of non-network ESSID.\n",
5540 print_ssid(ssid, network->ssid,
5541 network->ssid_len),
5542 network->bssid);
5543 return 0;
5544 }
5545 } else {
5546 /* If an ESSID has been configured then compare the broadcast
5547 * ESSID to ours */
5548 if ((priv->config & CFG_STATIC_ESSID) &&
5549 ((network->ssid_len != priv->essid_len) ||
5550 memcmp(network->ssid, priv->essid,
5551 min(network->ssid_len, priv->essid_len)))) {
5552 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5553
5554 strncpy(escaped,
5555 print_ssid(ssid, network->ssid,
5556 network->ssid_len),
5557 sizeof(escaped));
5558 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5559 "because of ESSID mismatch: '%s'.\n",
5560 escaped, network->bssid,
5561 print_ssid(ssid, priv->essid,
5562 priv->essid_len));
5563 return 0;
5564 }
5565 }
5566
5567 /* If the old network rate is better than this one, don't bother
5568 * testing everything else. */
5569
5570 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5571 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5572 "current network.\n",
5573 print_ssid(ssid, match->network->ssid,
5574 match->network->ssid_len));
5575 return 0;
5576 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5577 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5578 "current network.\n",
5579 print_ssid(ssid, match->network->ssid,
5580 match->network->ssid_len));
5581 return 0;
5582 }
5583
5584 /* Now go through and see if the requested network is valid... */
5585 if (priv->ieee->scan_age != 0 &&
5586 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5587 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5588 "because of age: %ums.\n",
5589 print_ssid(ssid, network->ssid,
5590 network->ssid_len),
5591 network->bssid,
5592 jiffies_to_msecs(jiffies -
5593 network->last_scanned));
5594 return 0;
5595 }
5596
5597 if ((priv->config & CFG_STATIC_CHANNEL) &&
5598 (network->channel != priv->channel)) {
5599 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5600 "because of channel mismatch: %d != %d.\n",
5601 print_ssid(ssid, network->ssid,
5602 network->ssid_len),
5603 network->bssid,
5604 network->channel, priv->channel);
5605 return 0;
5606 }
5607
5608 /* Verify privacy compatibility */
5609 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5610 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5611 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5612 "because of privacy mismatch: %s != %s.\n",
5613 print_ssid(ssid, network->ssid,
5614 network->ssid_len),
5615 network->bssid,
5616 priv->
5617 capability & CAP_PRIVACY_ON ? "on" : "off",
5618 network->
5619 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5620 "off");
5621 return 0;
5622 }
5623
5624 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5625 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5626 "because of the same BSSID match: %pM"
5627 ".\n", print_ssid(ssid, network->ssid,
5628 network->ssid_len),
5629 network->bssid,
5630 priv->bssid);
5631 return 0;
5632 }
5633
5634 /* Filter out any incompatible freq / mode combinations */
5635 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5636 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5637 "because of invalid frequency/mode "
5638 "combination.\n",
5639 print_ssid(ssid, network->ssid,
5640 network->ssid_len),
5641 network->bssid);
5642 return 0;
5643 }
5644
5645 /* Ensure that the rates supported by the driver are compatible with
5646 * this AP, including verification of basic rates (mandatory) */
5647 if (!ipw_compatible_rates(priv, network, &rates)) {
5648 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5649 "because configured rate mask excludes "
5650 "AP mandatory rate.\n",
5651 print_ssid(ssid, network->ssid,
5652 network->ssid_len),
5653 network->bssid);
5654 return 0;
5655 }
5656
5657 if (rates.num_rates == 0) {
5658 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5659 "because of no compatible rates.\n",
5660 print_ssid(ssid, network->ssid,
5661 network->ssid_len),
5662 network->bssid);
5663 return 0;
5664 }
5665
5666 /* TODO: Perform any further minimal comparititive tests. We do not
5667 * want to put too much policy logic here; intelligent scan selection
5668 * should occur within a generic IEEE 802.11 user space tool. */
5669
5670 /* Set up 'new' AP to this network */
5671 ipw_copy_rates(&match->rates, &rates);
5672 match->network = network;
5673 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5674 print_ssid(ssid, network->ssid, network->ssid_len),
5675 network->bssid);
5676
5677 return 1;
5678 }
5679
5680 static void ipw_merge_adhoc_network(struct work_struct *work)
5681 {
5682 DECLARE_SSID_BUF(ssid);
5683 struct ipw_priv *priv =
5684 container_of(work, struct ipw_priv, merge_networks);
5685 struct libipw_network *network = NULL;
5686 struct ipw_network_match match = {
5687 .network = priv->assoc_network
5688 };
5689
5690 if ((priv->status & STATUS_ASSOCIATED) &&
5691 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5692 /* First pass through ROAM process -- look for a better
5693 * network */
5694 unsigned long flags;
5695
5696 spin_lock_irqsave(&priv->ieee->lock, flags);
5697 list_for_each_entry(network, &priv->ieee->network_list, list) {
5698 if (network != priv->assoc_network)
5699 ipw_find_adhoc_network(priv, &match, network,
5700 1);
5701 }
5702 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5703
5704 if (match.network == priv->assoc_network) {
5705 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5706 "merge to.\n");
5707 return;
5708 }
5709
5710 mutex_lock(&priv->mutex);
5711 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5712 IPW_DEBUG_MERGE("remove network %s\n",
5713 print_ssid(ssid, priv->essid,
5714 priv->essid_len));
5715 ipw_remove_current_network(priv);
5716 }
5717
5718 ipw_disassociate(priv);
5719 priv->assoc_network = match.network;
5720 mutex_unlock(&priv->mutex);
5721 return;
5722 }
5723 }
5724
5725 static int ipw_best_network(struct ipw_priv *priv,
5726 struct ipw_network_match *match,
5727 struct libipw_network *network, int roaming)
5728 {
5729 struct ipw_supported_rates rates;
5730 DECLARE_SSID_BUF(ssid);
5731
5732 /* Verify that this network's capability is compatible with the
5733 * current mode (AdHoc or Infrastructure) */
5734 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5735 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5736 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5737 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5738 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5739 "capability mismatch.\n",
5740 print_ssid(ssid, network->ssid,
5741 network->ssid_len),
5742 network->bssid);
5743 return 0;
5744 }
5745
5746 if (unlikely(roaming)) {
5747 /* If we are roaming, then ensure check if this is a valid
5748 * network to try and roam to */
5749 if ((network->ssid_len != match->network->ssid_len) ||
5750 memcmp(network->ssid, match->network->ssid,
5751 network->ssid_len)) {
5752 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5753 "because of non-network ESSID.\n",
5754 print_ssid(ssid, network->ssid,
5755 network->ssid_len),
5756 network->bssid);
5757 return 0;
5758 }
5759 } else {
5760 /* If an ESSID has been configured then compare the broadcast
5761 * ESSID to ours */
5762 if ((priv->config & CFG_STATIC_ESSID) &&
5763 ((network->ssid_len != priv->essid_len) ||
5764 memcmp(network->ssid, priv->essid,
5765 min(network->ssid_len, priv->essid_len)))) {
5766 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5767 strncpy(escaped,
5768 print_ssid(ssid, network->ssid,
5769 network->ssid_len),
5770 sizeof(escaped));
5771 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5772 "because of ESSID mismatch: '%s'.\n",
5773 escaped, network->bssid,
5774 print_ssid(ssid, priv->essid,
5775 priv->essid_len));
5776 return 0;
5777 }
5778 }
5779
5780 /* If the old network rate is better than this one, don't bother
5781 * testing everything else. */
5782 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5783 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5784 strncpy(escaped,
5785 print_ssid(ssid, network->ssid, network->ssid_len),
5786 sizeof(escaped));
5787 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5788 "'%s (%pM)' has a stronger signal.\n",
5789 escaped, network->bssid,
5790 print_ssid(ssid, match->network->ssid,
5791 match->network->ssid_len),
5792 match->network->bssid);
5793 return 0;
5794 }
5795
5796 /* If this network has already had an association attempt within the
5797 * last 3 seconds, do not try and associate again... */
5798 if (network->last_associate &&
5799 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5800 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5801 "because of storming (%ums since last "
5802 "assoc attempt).\n",
5803 print_ssid(ssid, network->ssid,
5804 network->ssid_len),
5805 network->bssid,
5806 jiffies_to_msecs(jiffies -
5807 network->last_associate));
5808 return 0;
5809 }
5810
5811 /* Now go through and see if the requested network is valid... */
5812 if (priv->ieee->scan_age != 0 &&
5813 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5814 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5815 "because of age: %ums.\n",
5816 print_ssid(ssid, network->ssid,
5817 network->ssid_len),
5818 network->bssid,
5819 jiffies_to_msecs(jiffies -
5820 network->last_scanned));
5821 return 0;
5822 }
5823
5824 if ((priv->config & CFG_STATIC_CHANNEL) &&
5825 (network->channel != priv->channel)) {
5826 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5827 "because of channel mismatch: %d != %d.\n",
5828 print_ssid(ssid, network->ssid,
5829 network->ssid_len),
5830 network->bssid,
5831 network->channel, priv->channel);
5832 return 0;
5833 }
5834
5835 /* Verify privacy compatibility */
5836 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5837 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5838 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5839 "because of privacy mismatch: %s != %s.\n",
5840 print_ssid(ssid, network->ssid,
5841 network->ssid_len),
5842 network->bssid,
5843 priv->capability & CAP_PRIVACY_ON ? "on" :
5844 "off",
5845 network->capability &
5846 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5847 return 0;
5848 }
5849
5850 if ((priv->config & CFG_STATIC_BSSID) &&
5851 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5852 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5853 "because of BSSID mismatch: %pM.\n",
5854 print_ssid(ssid, network->ssid,
5855 network->ssid_len),
5856 network->bssid, priv->bssid);
5857 return 0;
5858 }
5859
5860 /* Filter out any incompatible freq / mode combinations */
5861 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5862 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5863 "because of invalid frequency/mode "
5864 "combination.\n",
5865 print_ssid(ssid, network->ssid,
5866 network->ssid_len),
5867 network->bssid);
5868 return 0;
5869 }
5870
5871 /* Filter out invalid channel in current GEO */
5872 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5873 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5874 "because of invalid channel in current GEO\n",
5875 print_ssid(ssid, network->ssid,
5876 network->ssid_len),
5877 network->bssid);
5878 return 0;
5879 }
5880
5881 /* Ensure that the rates supported by the driver are compatible with
5882 * this AP, including verification of basic rates (mandatory) */
5883 if (!ipw_compatible_rates(priv, network, &rates)) {
5884 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5885 "because configured rate mask excludes "
5886 "AP mandatory rate.\n",
5887 print_ssid(ssid, network->ssid,
5888 network->ssid_len),
5889 network->bssid);
5890 return 0;
5891 }
5892
5893 if (rates.num_rates == 0) {
5894 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5895 "because of no compatible rates.\n",
5896 print_ssid(ssid, network->ssid,
5897 network->ssid_len),
5898 network->bssid);
5899 return 0;
5900 }
5901
5902 /* TODO: Perform any further minimal comparititive tests. We do not
5903 * want to put too much policy logic here; intelligent scan selection
5904 * should occur within a generic IEEE 802.11 user space tool. */
5905
5906 /* Set up 'new' AP to this network */
5907 ipw_copy_rates(&match->rates, &rates);
5908 match->network = network;
5909
5910 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5911 print_ssid(ssid, network->ssid, network->ssid_len),
5912 network->bssid);
5913
5914 return 1;
5915 }
5916
5917 static void ipw_adhoc_create(struct ipw_priv *priv,
5918 struct libipw_network *network)
5919 {
5920 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5921 int i;
5922
5923 /*
5924 * For the purposes of scanning, we can set our wireless mode
5925 * to trigger scans across combinations of bands, but when it
5926 * comes to creating a new ad-hoc network, we have tell the FW
5927 * exactly which band to use.
5928 *
5929 * We also have the possibility of an invalid channel for the
5930 * chossen band. Attempting to create a new ad-hoc network
5931 * with an invalid channel for wireless mode will trigger a
5932 * FW fatal error.
5933 *
5934 */
5935 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5936 case LIBIPW_52GHZ_BAND:
5937 network->mode = IEEE_A;
5938 i = libipw_channel_to_index(priv->ieee, priv->channel);
5939 BUG_ON(i == -1);
5940 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5941 IPW_WARNING("Overriding invalid channel\n");
5942 priv->channel = geo->a[0].channel;
5943 }
5944 break;
5945
5946 case LIBIPW_24GHZ_BAND:
5947 if (priv->ieee->mode & IEEE_G)
5948 network->mode = IEEE_G;
5949 else
5950 network->mode = IEEE_B;
5951 i = libipw_channel_to_index(priv->ieee, priv->channel);
5952 BUG_ON(i == -1);
5953 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5954 IPW_WARNING("Overriding invalid channel\n");
5955 priv->channel = geo->bg[0].channel;
5956 }
5957 break;
5958
5959 default:
5960 IPW_WARNING("Overriding invalid channel\n");
5961 if (priv->ieee->mode & IEEE_A) {
5962 network->mode = IEEE_A;
5963 priv->channel = geo->a[0].channel;
5964 } else if (priv->ieee->mode & IEEE_G) {
5965 network->mode = IEEE_G;
5966 priv->channel = geo->bg[0].channel;
5967 } else {
5968 network->mode = IEEE_B;
5969 priv->channel = geo->bg[0].channel;
5970 }
5971 break;
5972 }
5973
5974 network->channel = priv->channel;
5975 priv->config |= CFG_ADHOC_PERSIST;
5976 ipw_create_bssid(priv, network->bssid);
5977 network->ssid_len = priv->essid_len;
5978 memcpy(network->ssid, priv->essid, priv->essid_len);
5979 memset(&network->stats, 0, sizeof(network->stats));
5980 network->capability = WLAN_CAPABILITY_IBSS;
5981 if (!(priv->config & CFG_PREAMBLE_LONG))
5982 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5983 if (priv->capability & CAP_PRIVACY_ON)
5984 network->capability |= WLAN_CAPABILITY_PRIVACY;
5985 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5986 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5987 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5988 memcpy(network->rates_ex,
5989 &priv->rates.supported_rates[network->rates_len],
5990 network->rates_ex_len);
5991 network->last_scanned = 0;
5992 network->flags = 0;
5993 network->last_associate = 0;
5994 network->time_stamp[0] = 0;
5995 network->time_stamp[1] = 0;
5996 network->beacon_interval = 100; /* Default */
5997 network->listen_interval = 10; /* Default */
5998 network->atim_window = 0; /* Default */
5999 network->wpa_ie_len = 0;
6000 network->rsn_ie_len = 0;
6001 }
6002
6003 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
6004 {
6005 struct ipw_tgi_tx_key key;
6006
6007 if (!(priv->ieee->sec.flags & (1 << index)))
6008 return;
6009
6010 key.key_id = index;
6011 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
6012 key.security_type = type;
6013 key.station_index = 0; /* always 0 for BSS */
6014 key.flags = 0;
6015 /* 0 for new key; previous value of counter (after fatal error) */
6016 key.tx_counter[0] = cpu_to_le32(0);
6017 key.tx_counter[1] = cpu_to_le32(0);
6018
6019 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6020 }
6021
6022 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6023 {
6024 struct ipw_wep_key key;
6025 int i;
6026
6027 key.cmd_id = DINO_CMD_WEP_KEY;
6028 key.seq_num = 0;
6029
6030 /* Note: AES keys cannot be set for multiple times.
6031 * Only set it at the first time. */
6032 for (i = 0; i < 4; i++) {
6033 key.key_index = i | type;
6034 if (!(priv->ieee->sec.flags & (1 << i))) {
6035 key.key_size = 0;
6036 continue;
6037 }
6038
6039 key.key_size = priv->ieee->sec.key_sizes[i];
6040 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6041
6042 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6043 }
6044 }
6045
6046 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6047 {
6048 if (priv->ieee->host_encrypt)
6049 return;
6050
6051 switch (level) {
6052 case SEC_LEVEL_3:
6053 priv->sys_config.disable_unicast_decryption = 0;
6054 priv->ieee->host_decrypt = 0;
6055 break;
6056 case SEC_LEVEL_2:
6057 priv->sys_config.disable_unicast_decryption = 1;
6058 priv->ieee->host_decrypt = 1;
6059 break;
6060 case SEC_LEVEL_1:
6061 priv->sys_config.disable_unicast_decryption = 0;
6062 priv->ieee->host_decrypt = 0;
6063 break;
6064 case SEC_LEVEL_0:
6065 priv->sys_config.disable_unicast_decryption = 1;
6066 break;
6067 default:
6068 break;
6069 }
6070 }
6071
6072 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6073 {
6074 if (priv->ieee->host_encrypt)
6075 return;
6076
6077 switch (level) {
6078 case SEC_LEVEL_3:
6079 priv->sys_config.disable_multicast_decryption = 0;
6080 break;
6081 case SEC_LEVEL_2:
6082 priv->sys_config.disable_multicast_decryption = 1;
6083 break;
6084 case SEC_LEVEL_1:
6085 priv->sys_config.disable_multicast_decryption = 0;
6086 break;
6087 case SEC_LEVEL_0:
6088 priv->sys_config.disable_multicast_decryption = 1;
6089 break;
6090 default:
6091 break;
6092 }
6093 }
6094
6095 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6096 {
6097 switch (priv->ieee->sec.level) {
6098 case SEC_LEVEL_3:
6099 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6100 ipw_send_tgi_tx_key(priv,
6101 DCT_FLAG_EXT_SECURITY_CCM,
6102 priv->ieee->sec.active_key);
6103
6104 if (!priv->ieee->host_mc_decrypt)
6105 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6106 break;
6107 case SEC_LEVEL_2:
6108 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6109 ipw_send_tgi_tx_key(priv,
6110 DCT_FLAG_EXT_SECURITY_TKIP,
6111 priv->ieee->sec.active_key);
6112 break;
6113 case SEC_LEVEL_1:
6114 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6115 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6116 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6117 break;
6118 case SEC_LEVEL_0:
6119 default:
6120 break;
6121 }
6122 }
6123
6124 static void ipw_adhoc_check(void *data)
6125 {
6126 struct ipw_priv *priv = data;
6127
6128 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6129 !(priv->config & CFG_ADHOC_PERSIST)) {
6130 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6131 IPW_DL_STATE | IPW_DL_ASSOC,
6132 "Missed beacon: %d - disassociate\n",
6133 priv->missed_adhoc_beacons);
6134 ipw_remove_current_network(priv);
6135 ipw_disassociate(priv);
6136 return;
6137 }
6138
6139 schedule_delayed_work(&priv->adhoc_check,
6140 le16_to_cpu(priv->assoc_request.beacon_interval));
6141 }
6142
6143 static void ipw_bg_adhoc_check(struct work_struct *work)
6144 {
6145 struct ipw_priv *priv =
6146 container_of(work, struct ipw_priv, adhoc_check.work);
6147 mutex_lock(&priv->mutex);
6148 ipw_adhoc_check(priv);
6149 mutex_unlock(&priv->mutex);
6150 }
6151
6152 static void ipw_debug_config(struct ipw_priv *priv)
6153 {
6154 DECLARE_SSID_BUF(ssid);
6155 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6156 "[CFG 0x%08X]\n", priv->config);
6157 if (priv->config & CFG_STATIC_CHANNEL)
6158 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6159 else
6160 IPW_DEBUG_INFO("Channel unlocked.\n");
6161 if (priv->config & CFG_STATIC_ESSID)
6162 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6163 print_ssid(ssid, priv->essid, priv->essid_len));
6164 else
6165 IPW_DEBUG_INFO("ESSID unlocked.\n");
6166 if (priv->config & CFG_STATIC_BSSID)
6167 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6168 else
6169 IPW_DEBUG_INFO("BSSID unlocked.\n");
6170 if (priv->capability & CAP_PRIVACY_ON)
6171 IPW_DEBUG_INFO("PRIVACY on\n");
6172 else
6173 IPW_DEBUG_INFO("PRIVACY off\n");
6174 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6175 }
6176
6177 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6178 {
6179 /* TODO: Verify that this works... */
6180 struct ipw_fixed_rate fr;
6181 u32 reg;
6182 u16 mask = 0;
6183 u16 new_tx_rates = priv->rates_mask;
6184
6185 /* Identify 'current FW band' and match it with the fixed
6186 * Tx rates */
6187
6188 switch (priv->ieee->freq_band) {
6189 case LIBIPW_52GHZ_BAND: /* A only */
6190 /* IEEE_A */
6191 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6192 /* Invalid fixed rate mask */
6193 IPW_DEBUG_WX
6194 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6195 new_tx_rates = 0;
6196 break;
6197 }
6198
6199 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6200 break;
6201
6202 default: /* 2.4Ghz or Mixed */
6203 /* IEEE_B */
6204 if (mode == IEEE_B) {
6205 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6206 /* Invalid fixed rate mask */
6207 IPW_DEBUG_WX
6208 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6209 new_tx_rates = 0;
6210 }
6211 break;
6212 }
6213
6214 /* IEEE_G */
6215 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6216 LIBIPW_OFDM_RATES_MASK)) {
6217 /* Invalid fixed rate mask */
6218 IPW_DEBUG_WX
6219 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6220 new_tx_rates = 0;
6221 break;
6222 }
6223
6224 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6225 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6226 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6227 }
6228
6229 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6230 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6231 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6232 }
6233
6234 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6235 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6236 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6237 }
6238
6239 new_tx_rates |= mask;
6240 break;
6241 }
6242
6243 fr.tx_rates = cpu_to_le16(new_tx_rates);
6244
6245 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6246 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6247 }
6248
6249 static void ipw_abort_scan(struct ipw_priv *priv)
6250 {
6251 int err;
6252
6253 if (priv->status & STATUS_SCAN_ABORTING) {
6254 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6255 return;
6256 }
6257 priv->status |= STATUS_SCAN_ABORTING;
6258
6259 err = ipw_send_scan_abort(priv);
6260 if (err)
6261 IPW_DEBUG_HC("Request to abort scan failed.\n");
6262 }
6263
6264 static void ipw_add_scan_channels(struct ipw_priv *priv,
6265 struct ipw_scan_request_ext *scan,
6266 int scan_type)
6267 {
6268 int channel_index = 0;
6269 const struct libipw_geo *geo;
6270 int i;
6271
6272 geo = libipw_get_geo(priv->ieee);
6273
6274 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6275 int start = channel_index;
6276 for (i = 0; i < geo->a_channels; i++) {
6277 if ((priv->status & STATUS_ASSOCIATED) &&
6278 geo->a[i].channel == priv->channel)
6279 continue;
6280 channel_index++;
6281 scan->channels_list[channel_index] = geo->a[i].channel;
6282 ipw_set_scan_type(scan, channel_index,
6283 geo->a[i].
6284 flags & LIBIPW_CH_PASSIVE_ONLY ?
6285 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6286 scan_type);
6287 }
6288
6289 if (start != channel_index) {
6290 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6291 (channel_index - start);
6292 channel_index++;
6293 }
6294 }
6295
6296 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6297 int start = channel_index;
6298 if (priv->config & CFG_SPEED_SCAN) {
6299 int index;
6300 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6301 /* nop out the list */
6302 [0] = 0
6303 };
6304
6305 u8 channel;
6306 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6307 channel =
6308 priv->speed_scan[priv->speed_scan_pos];
6309 if (channel == 0) {
6310 priv->speed_scan_pos = 0;
6311 channel = priv->speed_scan[0];
6312 }
6313 if ((priv->status & STATUS_ASSOCIATED) &&
6314 channel == priv->channel) {
6315 priv->speed_scan_pos++;
6316 continue;
6317 }
6318
6319 /* If this channel has already been
6320 * added in scan, break from loop
6321 * and this will be the first channel
6322 * in the next scan.
6323 */
6324 if (channels[channel - 1] != 0)
6325 break;
6326
6327 channels[channel - 1] = 1;
6328 priv->speed_scan_pos++;
6329 channel_index++;
6330 scan->channels_list[channel_index] = channel;
6331 index =
6332 libipw_channel_to_index(priv->ieee, channel);
6333 ipw_set_scan_type(scan, channel_index,
6334 geo->bg[index].
6335 flags &
6336 LIBIPW_CH_PASSIVE_ONLY ?
6337 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6338 : scan_type);
6339 }
6340 } else {
6341 for (i = 0; i < geo->bg_channels; i++) {
6342 if ((priv->status & STATUS_ASSOCIATED) &&
6343 geo->bg[i].channel == priv->channel)
6344 continue;
6345 channel_index++;
6346 scan->channels_list[channel_index] =
6347 geo->bg[i].channel;
6348 ipw_set_scan_type(scan, channel_index,
6349 geo->bg[i].
6350 flags &
6351 LIBIPW_CH_PASSIVE_ONLY ?
6352 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6353 : scan_type);
6354 }
6355 }
6356
6357 if (start != channel_index) {
6358 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6359 (channel_index - start);
6360 }
6361 }
6362 }
6363
6364 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6365 {
6366 /* staying on passive channels longer than the DTIM interval during a
6367 * scan, while associated, causes the firmware to cancel the scan
6368 * without notification. Hence, don't stay on passive channels longer
6369 * than the beacon interval.
6370 */
6371 if (priv->status & STATUS_ASSOCIATED
6372 && priv->assoc_network->beacon_interval > 10)
6373 return priv->assoc_network->beacon_interval - 10;
6374 else
6375 return 120;
6376 }
6377
6378 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6379 {
6380 struct ipw_scan_request_ext scan;
6381 int err = 0, scan_type;
6382
6383 if (!(priv->status & STATUS_INIT) ||
6384 (priv->status & STATUS_EXIT_PENDING))
6385 return 0;
6386
6387 mutex_lock(&priv->mutex);
6388
6389 if (direct && (priv->direct_scan_ssid_len == 0)) {
6390 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6391 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6392 goto done;
6393 }
6394
6395 if (priv->status & STATUS_SCANNING) {
6396 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6397 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6398 STATUS_SCAN_PENDING;
6399 goto done;
6400 }
6401
6402 if (!(priv->status & STATUS_SCAN_FORCED) &&
6403 priv->status & STATUS_SCAN_ABORTING) {
6404 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6405 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6406 STATUS_SCAN_PENDING;
6407 goto done;
6408 }
6409
6410 if (priv->status & STATUS_RF_KILL_MASK) {
6411 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6412 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6413 STATUS_SCAN_PENDING;
6414 goto done;
6415 }
6416
6417 memset(&scan, 0, sizeof(scan));
6418 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6419
6420 if (type == IW_SCAN_TYPE_PASSIVE) {
6421 IPW_DEBUG_WX("use passive scanning\n");
6422 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6423 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6424 cpu_to_le16(ipw_passive_dwell_time(priv));
6425 ipw_add_scan_channels(priv, &scan, scan_type);
6426 goto send_request;
6427 }
6428
6429 /* Use active scan by default. */
6430 if (priv->config & CFG_SPEED_SCAN)
6431 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6432 cpu_to_le16(30);
6433 else
6434 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6435 cpu_to_le16(20);
6436
6437 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6438 cpu_to_le16(20);
6439
6440 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6441 cpu_to_le16(ipw_passive_dwell_time(priv));
6442 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6443
6444 #ifdef CONFIG_IPW2200_MONITOR
6445 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6446 u8 channel;
6447 u8 band = 0;
6448
6449 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6450 case LIBIPW_52GHZ_BAND:
6451 band = (u8) (IPW_A_MODE << 6) | 1;
6452 channel = priv->channel;
6453 break;
6454
6455 case LIBIPW_24GHZ_BAND:
6456 band = (u8) (IPW_B_MODE << 6) | 1;
6457 channel = priv->channel;
6458 break;
6459
6460 default:
6461 band = (u8) (IPW_B_MODE << 6) | 1;
6462 channel = 9;
6463 break;
6464 }
6465
6466 scan.channels_list[0] = band;
6467 scan.channels_list[1] = channel;
6468 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6469
6470 /* NOTE: The card will sit on this channel for this time
6471 * period. Scan aborts are timing sensitive and frequently
6472 * result in firmware restarts. As such, it is best to
6473 * set a small dwell_time here and just keep re-issuing
6474 * scans. Otherwise fast channel hopping will not actually
6475 * hop channels.
6476 *
6477 * TODO: Move SPEED SCAN support to all modes and bands */
6478 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6479 cpu_to_le16(2000);
6480 } else {
6481 #endif /* CONFIG_IPW2200_MONITOR */
6482 /* Honor direct scans first, otherwise if we are roaming make
6483 * this a direct scan for the current network. Finally,
6484 * ensure that every other scan is a fast channel hop scan */
6485 if (direct) {
6486 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6487 priv->direct_scan_ssid_len);
6488 if (err) {
6489 IPW_DEBUG_HC("Attempt to send SSID command "
6490 "failed\n");
6491 goto done;
6492 }
6493
6494 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6495 } else if ((priv->status & STATUS_ROAMING)
6496 || (!(priv->status & STATUS_ASSOCIATED)
6497 && (priv->config & CFG_STATIC_ESSID)
6498 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6499 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6500 if (err) {
6501 IPW_DEBUG_HC("Attempt to send SSID command "
6502 "failed.\n");
6503 goto done;
6504 }
6505
6506 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6507 } else
6508 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6509
6510 ipw_add_scan_channels(priv, &scan, scan_type);
6511 #ifdef CONFIG_IPW2200_MONITOR
6512 }
6513 #endif
6514
6515 send_request:
6516 err = ipw_send_scan_request_ext(priv, &scan);
6517 if (err) {
6518 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6519 goto done;
6520 }
6521
6522 priv->status |= STATUS_SCANNING;
6523 if (direct) {
6524 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6525 priv->direct_scan_ssid_len = 0;
6526 } else
6527 priv->status &= ~STATUS_SCAN_PENDING;
6528
6529 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6530 done:
6531 mutex_unlock(&priv->mutex);
6532 return err;
6533 }
6534
6535 static void ipw_request_passive_scan(struct work_struct *work)
6536 {
6537 struct ipw_priv *priv =
6538 container_of(work, struct ipw_priv, request_passive_scan.work);
6539 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6540 }
6541
6542 static void ipw_request_scan(struct work_struct *work)
6543 {
6544 struct ipw_priv *priv =
6545 container_of(work, struct ipw_priv, request_scan.work);
6546 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6547 }
6548
6549 static void ipw_request_direct_scan(struct work_struct *work)
6550 {
6551 struct ipw_priv *priv =
6552 container_of(work, struct ipw_priv, request_direct_scan.work);
6553 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6554 }
6555
6556 static void ipw_bg_abort_scan(struct work_struct *work)
6557 {
6558 struct ipw_priv *priv =
6559 container_of(work, struct ipw_priv, abort_scan);
6560 mutex_lock(&priv->mutex);
6561 ipw_abort_scan(priv);
6562 mutex_unlock(&priv->mutex);
6563 }
6564
6565 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6566 {
6567 /* This is called when wpa_supplicant loads and closes the driver
6568 * interface. */
6569 priv->ieee->wpa_enabled = value;
6570 return 0;
6571 }
6572
6573 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6574 {
6575 struct libipw_device *ieee = priv->ieee;
6576 struct libipw_security sec = {
6577 .flags = SEC_AUTH_MODE,
6578 };
6579 int ret = 0;
6580
6581 if (value & IW_AUTH_ALG_SHARED_KEY) {
6582 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6583 ieee->open_wep = 0;
6584 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6585 sec.auth_mode = WLAN_AUTH_OPEN;
6586 ieee->open_wep = 1;
6587 } else if (value & IW_AUTH_ALG_LEAP) {
6588 sec.auth_mode = WLAN_AUTH_LEAP;
6589 ieee->open_wep = 1;
6590 } else
6591 return -EINVAL;
6592
6593 if (ieee->set_security)
6594 ieee->set_security(ieee->dev, &sec);
6595 else
6596 ret = -EOPNOTSUPP;
6597
6598 return ret;
6599 }
6600
6601 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6602 int wpa_ie_len)
6603 {
6604 /* make sure WPA is enabled */
6605 ipw_wpa_enable(priv, 1);
6606 }
6607
6608 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6609 char *capabilities, int length)
6610 {
6611 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6612
6613 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6614 capabilities);
6615 }
6616
6617 /*
6618 * WE-18 support
6619 */
6620
6621 /* SIOCSIWGENIE */
6622 static int ipw_wx_set_genie(struct net_device *dev,
6623 struct iw_request_info *info,
6624 union iwreq_data *wrqu, char *extra)
6625 {
6626 struct ipw_priv *priv = libipw_priv(dev);
6627 struct libipw_device *ieee = priv->ieee;
6628 u8 *buf;
6629 int err = 0;
6630
6631 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6632 (wrqu->data.length && extra == NULL))
6633 return -EINVAL;
6634
6635 if (wrqu->data.length) {
6636 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6637 if (buf == NULL) {
6638 err = -ENOMEM;
6639 goto out;
6640 }
6641
6642 kfree(ieee->wpa_ie);
6643 ieee->wpa_ie = buf;
6644 ieee->wpa_ie_len = wrqu->data.length;
6645 } else {
6646 kfree(ieee->wpa_ie);
6647 ieee->wpa_ie = NULL;
6648 ieee->wpa_ie_len = 0;
6649 }
6650
6651 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6652 out:
6653 return err;
6654 }
6655
6656 /* SIOCGIWGENIE */
6657 static int ipw_wx_get_genie(struct net_device *dev,
6658 struct iw_request_info *info,
6659 union iwreq_data *wrqu, char *extra)
6660 {
6661 struct ipw_priv *priv = libipw_priv(dev);
6662 struct libipw_device *ieee = priv->ieee;
6663 int err = 0;
6664
6665 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6666 wrqu->data.length = 0;
6667 goto out;
6668 }
6669
6670 if (wrqu->data.length < ieee->wpa_ie_len) {
6671 err = -E2BIG;
6672 goto out;
6673 }
6674
6675 wrqu->data.length = ieee->wpa_ie_len;
6676 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6677
6678 out:
6679 return err;
6680 }
6681
6682 static int wext_cipher2level(int cipher)
6683 {
6684 switch (cipher) {
6685 case IW_AUTH_CIPHER_NONE:
6686 return SEC_LEVEL_0;
6687 case IW_AUTH_CIPHER_WEP40:
6688 case IW_AUTH_CIPHER_WEP104:
6689 return SEC_LEVEL_1;
6690 case IW_AUTH_CIPHER_TKIP:
6691 return SEC_LEVEL_2;
6692 case IW_AUTH_CIPHER_CCMP:
6693 return SEC_LEVEL_3;
6694 default:
6695 return -1;
6696 }
6697 }
6698
6699 /* SIOCSIWAUTH */
6700 static int ipw_wx_set_auth(struct net_device *dev,
6701 struct iw_request_info *info,
6702 union iwreq_data *wrqu, char *extra)
6703 {
6704 struct ipw_priv *priv = libipw_priv(dev);
6705 struct libipw_device *ieee = priv->ieee;
6706 struct iw_param *param = &wrqu->param;
6707 struct lib80211_crypt_data *crypt;
6708 unsigned long flags;
6709 int ret = 0;
6710
6711 switch (param->flags & IW_AUTH_INDEX) {
6712 case IW_AUTH_WPA_VERSION:
6713 break;
6714 case IW_AUTH_CIPHER_PAIRWISE:
6715 ipw_set_hw_decrypt_unicast(priv,
6716 wext_cipher2level(param->value));
6717 break;
6718 case IW_AUTH_CIPHER_GROUP:
6719 ipw_set_hw_decrypt_multicast(priv,
6720 wext_cipher2level(param->value));
6721 break;
6722 case IW_AUTH_KEY_MGMT:
6723 /*
6724 * ipw2200 does not use these parameters
6725 */
6726 break;
6727
6728 case IW_AUTH_TKIP_COUNTERMEASURES:
6729 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6730 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6731 break;
6732
6733 flags = crypt->ops->get_flags(crypt->priv);
6734
6735 if (param->value)
6736 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6737 else
6738 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6739
6740 crypt->ops->set_flags(flags, crypt->priv);
6741
6742 break;
6743
6744 case IW_AUTH_DROP_UNENCRYPTED:{
6745 /* HACK:
6746 *
6747 * wpa_supplicant calls set_wpa_enabled when the driver
6748 * is loaded and unloaded, regardless of if WPA is being
6749 * used. No other calls are made which can be used to
6750 * determine if encryption will be used or not prior to
6751 * association being expected. If encryption is not being
6752 * used, drop_unencrypted is set to false, else true -- we
6753 * can use this to determine if the CAP_PRIVACY_ON bit should
6754 * be set.
6755 */
6756 struct libipw_security sec = {
6757 .flags = SEC_ENABLED,
6758 .enabled = param->value,
6759 };
6760 priv->ieee->drop_unencrypted = param->value;
6761 /* We only change SEC_LEVEL for open mode. Others
6762 * are set by ipw_wpa_set_encryption.
6763 */
6764 if (!param->value) {
6765 sec.flags |= SEC_LEVEL;
6766 sec.level = SEC_LEVEL_0;
6767 } else {
6768 sec.flags |= SEC_LEVEL;
6769 sec.level = SEC_LEVEL_1;
6770 }
6771 if (priv->ieee->set_security)
6772 priv->ieee->set_security(priv->ieee->dev, &sec);
6773 break;
6774 }
6775
6776 case IW_AUTH_80211_AUTH_ALG:
6777 ret = ipw_wpa_set_auth_algs(priv, param->value);
6778 break;
6779
6780 case IW_AUTH_WPA_ENABLED:
6781 ret = ipw_wpa_enable(priv, param->value);
6782 ipw_disassociate(priv);
6783 break;
6784
6785 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6786 ieee->ieee802_1x = param->value;
6787 break;
6788
6789 case IW_AUTH_PRIVACY_INVOKED:
6790 ieee->privacy_invoked = param->value;
6791 break;
6792
6793 default:
6794 return -EOPNOTSUPP;
6795 }
6796 return ret;
6797 }
6798
6799 /* SIOCGIWAUTH */
6800 static int ipw_wx_get_auth(struct net_device *dev,
6801 struct iw_request_info *info,
6802 union iwreq_data *wrqu, char *extra)
6803 {
6804 struct ipw_priv *priv = libipw_priv(dev);
6805 struct libipw_device *ieee = priv->ieee;
6806 struct lib80211_crypt_data *crypt;
6807 struct iw_param *param = &wrqu->param;
6808
6809 switch (param->flags & IW_AUTH_INDEX) {
6810 case IW_AUTH_WPA_VERSION:
6811 case IW_AUTH_CIPHER_PAIRWISE:
6812 case IW_AUTH_CIPHER_GROUP:
6813 case IW_AUTH_KEY_MGMT:
6814 /*
6815 * wpa_supplicant will control these internally
6816 */
6817 return -EOPNOTSUPP;
6818
6819 case IW_AUTH_TKIP_COUNTERMEASURES:
6820 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6821 if (!crypt || !crypt->ops->get_flags)
6822 break;
6823
6824 param->value = (crypt->ops->get_flags(crypt->priv) &
6825 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6826
6827 break;
6828
6829 case IW_AUTH_DROP_UNENCRYPTED:
6830 param->value = ieee->drop_unencrypted;
6831 break;
6832
6833 case IW_AUTH_80211_AUTH_ALG:
6834 param->value = ieee->sec.auth_mode;
6835 break;
6836
6837 case IW_AUTH_WPA_ENABLED:
6838 param->value = ieee->wpa_enabled;
6839 break;
6840
6841 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6842 param->value = ieee->ieee802_1x;
6843 break;
6844
6845 case IW_AUTH_ROAMING_CONTROL:
6846 case IW_AUTH_PRIVACY_INVOKED:
6847 param->value = ieee->privacy_invoked;
6848 break;
6849
6850 default:
6851 return -EOPNOTSUPP;
6852 }
6853 return 0;
6854 }
6855
6856 /* SIOCSIWENCODEEXT */
6857 static int ipw_wx_set_encodeext(struct net_device *dev,
6858 struct iw_request_info *info,
6859 union iwreq_data *wrqu, char *extra)
6860 {
6861 struct ipw_priv *priv = libipw_priv(dev);
6862 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6863
6864 if (hwcrypto) {
6865 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6866 /* IPW HW can't build TKIP MIC,
6867 host decryption still needed */
6868 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6869 priv->ieee->host_mc_decrypt = 1;
6870 else {
6871 priv->ieee->host_encrypt = 0;
6872 priv->ieee->host_encrypt_msdu = 1;
6873 priv->ieee->host_decrypt = 1;
6874 }
6875 } else {
6876 priv->ieee->host_encrypt = 0;
6877 priv->ieee->host_encrypt_msdu = 0;
6878 priv->ieee->host_decrypt = 0;
6879 priv->ieee->host_mc_decrypt = 0;
6880 }
6881 }
6882
6883 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6884 }
6885
6886 /* SIOCGIWENCODEEXT */
6887 static int ipw_wx_get_encodeext(struct net_device *dev,
6888 struct iw_request_info *info,
6889 union iwreq_data *wrqu, char *extra)
6890 {
6891 struct ipw_priv *priv = libipw_priv(dev);
6892 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6893 }
6894
6895 /* SIOCSIWMLME */
6896 static int ipw_wx_set_mlme(struct net_device *dev,
6897 struct iw_request_info *info,
6898 union iwreq_data *wrqu, char *extra)
6899 {
6900 struct ipw_priv *priv = libipw_priv(dev);
6901 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6902 __le16 reason;
6903
6904 reason = cpu_to_le16(mlme->reason_code);
6905
6906 switch (mlme->cmd) {
6907 case IW_MLME_DEAUTH:
6908 /* silently ignore */
6909 break;
6910
6911 case IW_MLME_DISASSOC:
6912 ipw_disassociate(priv);
6913 break;
6914
6915 default:
6916 return -EOPNOTSUPP;
6917 }
6918 return 0;
6919 }
6920
6921 #ifdef CONFIG_IPW2200_QOS
6922
6923 /* QoS */
6924 /*
6925 * get the modulation type of the current network or
6926 * the card current mode
6927 */
6928 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6929 {
6930 u8 mode = 0;
6931
6932 if (priv->status & STATUS_ASSOCIATED) {
6933 unsigned long flags;
6934
6935 spin_lock_irqsave(&priv->ieee->lock, flags);
6936 mode = priv->assoc_network->mode;
6937 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6938 } else {
6939 mode = priv->ieee->mode;
6940 }
6941 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6942 return mode;
6943 }
6944
6945 /*
6946 * Handle management frame beacon and probe response
6947 */
6948 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6949 int active_network,
6950 struct libipw_network *network)
6951 {
6952 u32 size = sizeof(struct libipw_qos_parameters);
6953
6954 if (network->capability & WLAN_CAPABILITY_IBSS)
6955 network->qos_data.active = network->qos_data.supported;
6956
6957 if (network->flags & NETWORK_HAS_QOS_MASK) {
6958 if (active_network &&
6959 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6960 network->qos_data.active = network->qos_data.supported;
6961
6962 if ((network->qos_data.active == 1) && (active_network == 1) &&
6963 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6964 (network->qos_data.old_param_count !=
6965 network->qos_data.param_count)) {
6966 network->qos_data.old_param_count =
6967 network->qos_data.param_count;
6968 schedule_work(&priv->qos_activate);
6969 IPW_DEBUG_QOS("QoS parameters change call "
6970 "qos_activate\n");
6971 }
6972 } else {
6973 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6974 memcpy(&network->qos_data.parameters,
6975 &def_parameters_CCK, size);
6976 else
6977 memcpy(&network->qos_data.parameters,
6978 &def_parameters_OFDM, size);
6979
6980 if ((network->qos_data.active == 1) && (active_network == 1)) {
6981 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6982 schedule_work(&priv->qos_activate);
6983 }
6984
6985 network->qos_data.active = 0;
6986 network->qos_data.supported = 0;
6987 }
6988 if ((priv->status & STATUS_ASSOCIATED) &&
6989 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6990 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6991 if (network->capability & WLAN_CAPABILITY_IBSS)
6992 if ((network->ssid_len ==
6993 priv->assoc_network->ssid_len) &&
6994 !memcmp(network->ssid,
6995 priv->assoc_network->ssid,
6996 network->ssid_len)) {
6997 schedule_work(&priv->merge_networks);
6998 }
6999 }
7000
7001 return 0;
7002 }
7003
7004 /*
7005 * This function set up the firmware to support QoS. It sends
7006 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
7007 */
7008 static int ipw_qos_activate(struct ipw_priv *priv,
7009 struct libipw_qos_data *qos_network_data)
7010 {
7011 int err;
7012 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7013 struct libipw_qos_parameters *active_one = NULL;
7014 u32 size = sizeof(struct libipw_qos_parameters);
7015 u32 burst_duration;
7016 int i;
7017 u8 type;
7018
7019 type = ipw_qos_current_mode(priv);
7020
7021 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7022 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7023 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7024 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7025
7026 if (qos_network_data == NULL) {
7027 if (type == IEEE_B) {
7028 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7029 active_one = &def_parameters_CCK;
7030 } else
7031 active_one = &def_parameters_OFDM;
7032
7033 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7034 burst_duration = ipw_qos_get_burst_duration(priv);
7035 for (i = 0; i < QOS_QUEUE_NUM; i++)
7036 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7037 cpu_to_le16(burst_duration);
7038 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7039 if (type == IEEE_B) {
7040 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
7041 type);
7042 if (priv->qos_data.qos_enable == 0)
7043 active_one = &def_parameters_CCK;
7044 else
7045 active_one = priv->qos_data.def_qos_parm_CCK;
7046 } else {
7047 if (priv->qos_data.qos_enable == 0)
7048 active_one = &def_parameters_OFDM;
7049 else
7050 active_one = priv->qos_data.def_qos_parm_OFDM;
7051 }
7052 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7053 } else {
7054 unsigned long flags;
7055 int active;
7056
7057 spin_lock_irqsave(&priv->ieee->lock, flags);
7058 active_one = &(qos_network_data->parameters);
7059 qos_network_data->old_param_count =
7060 qos_network_data->param_count;
7061 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7062 active = qos_network_data->supported;
7063 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7064
7065 if (active == 0) {
7066 burst_duration = ipw_qos_get_burst_duration(priv);
7067 for (i = 0; i < QOS_QUEUE_NUM; i++)
7068 qos_parameters[QOS_PARAM_SET_ACTIVE].
7069 tx_op_limit[i] = cpu_to_le16(burst_duration);
7070 }
7071 }
7072
7073 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7074 err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
7075 if (err)
7076 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7077
7078 return err;
7079 }
7080
7081 /*
7082 * send IPW_CMD_WME_INFO to the firmware
7083 */
7084 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7085 {
7086 int ret = 0;
7087 struct libipw_qos_information_element qos_info;
7088
7089 if (priv == NULL)
7090 return -1;
7091
7092 qos_info.elementID = QOS_ELEMENT_ID;
7093 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7094
7095 qos_info.version = QOS_VERSION_1;
7096 qos_info.ac_info = 0;
7097
7098 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7099 qos_info.qui_type = QOS_OUI_TYPE;
7100 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7101
7102 ret = ipw_send_qos_info_command(priv, &qos_info);
7103 if (ret != 0) {
7104 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7105 }
7106 return ret;
7107 }
7108
7109 /*
7110 * Set the QoS parameter with the association request structure
7111 */
7112 static int ipw_qos_association(struct ipw_priv *priv,
7113 struct libipw_network *network)
7114 {
7115 int err = 0;
7116 struct libipw_qos_data *qos_data = NULL;
7117 struct libipw_qos_data ibss_data = {
7118 .supported = 1,
7119 .active = 1,
7120 };
7121
7122 switch (priv->ieee->iw_mode) {
7123 case IW_MODE_ADHOC:
7124 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7125
7126 qos_data = &ibss_data;
7127 break;
7128
7129 case IW_MODE_INFRA:
7130 qos_data = &network->qos_data;
7131 break;
7132
7133 default:
7134 BUG();
7135 break;
7136 }
7137
7138 err = ipw_qos_activate(priv, qos_data);
7139 if (err) {
7140 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7141 return err;
7142 }
7143
7144 if (priv->qos_data.qos_enable && qos_data->supported) {
7145 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7146 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7147 return ipw_qos_set_info_element(priv);
7148 }
7149
7150 return 0;
7151 }
7152
7153 /*
7154 * handling the beaconing responses. if we get different QoS setting
7155 * off the network from the associated setting, adjust the QoS
7156 * setting
7157 */
7158 static int ipw_qos_association_resp(struct ipw_priv *priv,
7159 struct libipw_network *network)
7160 {
7161 int ret = 0;
7162 unsigned long flags;
7163 u32 size = sizeof(struct libipw_qos_parameters);
7164 int set_qos_param = 0;
7165
7166 if ((priv == NULL) || (network == NULL) ||
7167 (priv->assoc_network == NULL))
7168 return ret;
7169
7170 if (!(priv->status & STATUS_ASSOCIATED))
7171 return ret;
7172
7173 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7174 return ret;
7175
7176 spin_lock_irqsave(&priv->ieee->lock, flags);
7177 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7178 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7179 sizeof(struct libipw_qos_data));
7180 priv->assoc_network->qos_data.active = 1;
7181 if ((network->qos_data.old_param_count !=
7182 network->qos_data.param_count)) {
7183 set_qos_param = 1;
7184 network->qos_data.old_param_count =
7185 network->qos_data.param_count;
7186 }
7187
7188 } else {
7189 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7190 memcpy(&priv->assoc_network->qos_data.parameters,
7191 &def_parameters_CCK, size);
7192 else
7193 memcpy(&priv->assoc_network->qos_data.parameters,
7194 &def_parameters_OFDM, size);
7195 priv->assoc_network->qos_data.active = 0;
7196 priv->assoc_network->qos_data.supported = 0;
7197 set_qos_param = 1;
7198 }
7199
7200 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7201
7202 if (set_qos_param == 1)
7203 schedule_work(&priv->qos_activate);
7204
7205 return ret;
7206 }
7207
7208 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7209 {
7210 u32 ret = 0;
7211
7212 if ((priv == NULL))
7213 return 0;
7214
7215 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7216 ret = priv->qos_data.burst_duration_CCK;
7217 else
7218 ret = priv->qos_data.burst_duration_OFDM;
7219
7220 return ret;
7221 }
7222
7223 /*
7224 * Initialize the setting of QoS global
7225 */
7226 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7227 int burst_enable, u32 burst_duration_CCK,
7228 u32 burst_duration_OFDM)
7229 {
7230 priv->qos_data.qos_enable = enable;
7231
7232 if (priv->qos_data.qos_enable) {
7233 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7234 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7235 IPW_DEBUG_QOS("QoS is enabled\n");
7236 } else {
7237 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7238 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7239 IPW_DEBUG_QOS("QoS is not enabled\n");
7240 }
7241
7242 priv->qos_data.burst_enable = burst_enable;
7243
7244 if (burst_enable) {
7245 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7246 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7247 } else {
7248 priv->qos_data.burst_duration_CCK = 0;
7249 priv->qos_data.burst_duration_OFDM = 0;
7250 }
7251 }
7252
7253 /*
7254 * map the packet priority to the right TX Queue
7255 */
7256 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7257 {
7258 if (priority > 7 || !priv->qos_data.qos_enable)
7259 priority = 0;
7260
7261 return from_priority_to_tx_queue[priority] - 1;
7262 }
7263
7264 static int ipw_is_qos_active(struct net_device *dev,
7265 struct sk_buff *skb)
7266 {
7267 struct ipw_priv *priv = libipw_priv(dev);
7268 struct libipw_qos_data *qos_data = NULL;
7269 int active, supported;
7270 u8 *daddr = skb->data + ETH_ALEN;
7271 int unicast = !is_multicast_ether_addr(daddr);
7272
7273 if (!(priv->status & STATUS_ASSOCIATED))
7274 return 0;
7275
7276 qos_data = &priv->assoc_network->qos_data;
7277
7278 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7279 if (unicast == 0)
7280 qos_data->active = 0;
7281 else
7282 qos_data->active = qos_data->supported;
7283 }
7284 active = qos_data->active;
7285 supported = qos_data->supported;
7286 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7287 "unicast %d\n",
7288 priv->qos_data.qos_enable, active, supported, unicast);
7289 if (active && priv->qos_data.qos_enable)
7290 return 1;
7291
7292 return 0;
7293
7294 }
7295 /*
7296 * add QoS parameter to the TX command
7297 */
7298 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7299 u16 priority,
7300 struct tfd_data *tfd)
7301 {
7302 int tx_queue_id = 0;
7303
7304
7305 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7306 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7307
7308 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7309 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7310 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7311 }
7312 return 0;
7313 }
7314
7315 /*
7316 * background support to run QoS activate functionality
7317 */
7318 static void ipw_bg_qos_activate(struct work_struct *work)
7319 {
7320 struct ipw_priv *priv =
7321 container_of(work, struct ipw_priv, qos_activate);
7322
7323 mutex_lock(&priv->mutex);
7324
7325 if (priv->status & STATUS_ASSOCIATED)
7326 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7327
7328 mutex_unlock(&priv->mutex);
7329 }
7330
7331 static int ipw_handle_probe_response(struct net_device *dev,
7332 struct libipw_probe_response *resp,
7333 struct libipw_network *network)
7334 {
7335 struct ipw_priv *priv = libipw_priv(dev);
7336 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7337 (network == priv->assoc_network));
7338
7339 ipw_qos_handle_probe_response(priv, active_network, network);
7340
7341 return 0;
7342 }
7343
7344 static int ipw_handle_beacon(struct net_device *dev,
7345 struct libipw_beacon *resp,
7346 struct libipw_network *network)
7347 {
7348 struct ipw_priv *priv = libipw_priv(dev);
7349 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7350 (network == priv->assoc_network));
7351
7352 ipw_qos_handle_probe_response(priv, active_network, network);
7353
7354 return 0;
7355 }
7356
7357 static int ipw_handle_assoc_response(struct net_device *dev,
7358 struct libipw_assoc_response *resp,
7359 struct libipw_network *network)
7360 {
7361 struct ipw_priv *priv = libipw_priv(dev);
7362 ipw_qos_association_resp(priv, network);
7363 return 0;
7364 }
7365
7366 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7367 *qos_param)
7368 {
7369 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7370 sizeof(*qos_param) * 3, qos_param);
7371 }
7372
7373 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7374 *qos_param)
7375 {
7376 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7377 qos_param);
7378 }
7379
7380 #endif /* CONFIG_IPW2200_QOS */
7381
7382 static int ipw_associate_network(struct ipw_priv *priv,
7383 struct libipw_network *network,
7384 struct ipw_supported_rates *rates, int roaming)
7385 {
7386 int err;
7387 DECLARE_SSID_BUF(ssid);
7388
7389 if (priv->config & CFG_FIXED_RATE)
7390 ipw_set_fixed_rate(priv, network->mode);
7391
7392 if (!(priv->config & CFG_STATIC_ESSID)) {
7393 priv->essid_len = min(network->ssid_len,
7394 (u8) IW_ESSID_MAX_SIZE);
7395 memcpy(priv->essid, network->ssid, priv->essid_len);
7396 }
7397
7398 network->last_associate = jiffies;
7399
7400 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7401 priv->assoc_request.channel = network->channel;
7402 priv->assoc_request.auth_key = 0;
7403
7404 if ((priv->capability & CAP_PRIVACY_ON) &&
7405 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7406 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7407 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7408
7409 if (priv->ieee->sec.level == SEC_LEVEL_1)
7410 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7411
7412 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7413 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7414 priv->assoc_request.auth_type = AUTH_LEAP;
7415 else
7416 priv->assoc_request.auth_type = AUTH_OPEN;
7417
7418 if (priv->ieee->wpa_ie_len) {
7419 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7420 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7421 priv->ieee->wpa_ie_len);
7422 }
7423
7424 /*
7425 * It is valid for our ieee device to support multiple modes, but
7426 * when it comes to associating to a given network we have to choose
7427 * just one mode.
7428 */
7429 if (network->mode & priv->ieee->mode & IEEE_A)
7430 priv->assoc_request.ieee_mode = IPW_A_MODE;
7431 else if (network->mode & priv->ieee->mode & IEEE_G)
7432 priv->assoc_request.ieee_mode = IPW_G_MODE;
7433 else if (network->mode & priv->ieee->mode & IEEE_B)
7434 priv->assoc_request.ieee_mode = IPW_B_MODE;
7435
7436 priv->assoc_request.capability = cpu_to_le16(network->capability);
7437 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7438 && !(priv->config & CFG_PREAMBLE_LONG)) {
7439 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7440 } else {
7441 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7442
7443 /* Clear the short preamble if we won't be supporting it */
7444 priv->assoc_request.capability &=
7445 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7446 }
7447
7448 /* Clear capability bits that aren't used in Ad Hoc */
7449 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7450 priv->assoc_request.capability &=
7451 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7452
7453 IPW_DEBUG_ASSOC("%ssociation attempt: '%s', channel %d, "
7454 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7455 roaming ? "Rea" : "A",
7456 print_ssid(ssid, priv->essid, priv->essid_len),
7457 network->channel,
7458 ipw_modes[priv->assoc_request.ieee_mode],
7459 rates->num_rates,
7460 (priv->assoc_request.preamble_length ==
7461 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7462 network->capability &
7463 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7464 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7465 priv->capability & CAP_PRIVACY_ON ?
7466 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7467 "(open)") : "",
7468 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7469 priv->capability & CAP_PRIVACY_ON ?
7470 '1' + priv->ieee->sec.active_key : '.',
7471 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7472
7473 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7474 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7475 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7476 priv->assoc_request.assoc_type = HC_IBSS_START;
7477 priv->assoc_request.assoc_tsf_msw = 0;
7478 priv->assoc_request.assoc_tsf_lsw = 0;
7479 } else {
7480 if (unlikely(roaming))
7481 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7482 else
7483 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7484 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7485 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7486 }
7487
7488 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7489
7490 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7491 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7492 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7493 } else {
7494 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7495 priv->assoc_request.atim_window = 0;
7496 }
7497
7498 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7499
7500 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7501 if (err) {
7502 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7503 return err;
7504 }
7505
7506 rates->ieee_mode = priv->assoc_request.ieee_mode;
7507 rates->purpose = IPW_RATE_CONNECT;
7508 ipw_send_supported_rates(priv, rates);
7509
7510 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7511 priv->sys_config.dot11g_auto_detection = 1;
7512 else
7513 priv->sys_config.dot11g_auto_detection = 0;
7514
7515 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7516 priv->sys_config.answer_broadcast_ssid_probe = 1;
7517 else
7518 priv->sys_config.answer_broadcast_ssid_probe = 0;
7519
7520 err = ipw_send_system_config(priv);
7521 if (err) {
7522 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7523 return err;
7524 }
7525
7526 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7527 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7528 if (err) {
7529 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7530 return err;
7531 }
7532
7533 /*
7534 * If preemption is enabled, it is possible for the association
7535 * to complete before we return from ipw_send_associate. Therefore
7536 * we have to be sure and update our priviate data first.
7537 */
7538 priv->channel = network->channel;
7539 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7540 priv->status |= STATUS_ASSOCIATING;
7541 priv->status &= ~STATUS_SECURITY_UPDATED;
7542
7543 priv->assoc_network = network;
7544
7545 #ifdef CONFIG_IPW2200_QOS
7546 ipw_qos_association(priv, network);
7547 #endif
7548
7549 err = ipw_send_associate(priv, &priv->assoc_request);
7550 if (err) {
7551 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7552 return err;
7553 }
7554
7555 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7556 print_ssid(ssid, priv->essid, priv->essid_len),
7557 priv->bssid);
7558
7559 return 0;
7560 }
7561
7562 static void ipw_roam(void *data)
7563 {
7564 struct ipw_priv *priv = data;
7565 struct libipw_network *network = NULL;
7566 struct ipw_network_match match = {
7567 .network = priv->assoc_network
7568 };
7569
7570 /* The roaming process is as follows:
7571 *
7572 * 1. Missed beacon threshold triggers the roaming process by
7573 * setting the status ROAM bit and requesting a scan.
7574 * 2. When the scan completes, it schedules the ROAM work
7575 * 3. The ROAM work looks at all of the known networks for one that
7576 * is a better network than the currently associated. If none
7577 * found, the ROAM process is over (ROAM bit cleared)
7578 * 4. If a better network is found, a disassociation request is
7579 * sent.
7580 * 5. When the disassociation completes, the roam work is again
7581 * scheduled. The second time through, the driver is no longer
7582 * associated, and the newly selected network is sent an
7583 * association request.
7584 * 6. At this point ,the roaming process is complete and the ROAM
7585 * status bit is cleared.
7586 */
7587
7588 /* If we are no longer associated, and the roaming bit is no longer
7589 * set, then we are not actively roaming, so just return */
7590 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7591 return;
7592
7593 if (priv->status & STATUS_ASSOCIATED) {
7594 /* First pass through ROAM process -- look for a better
7595 * network */
7596 unsigned long flags;
7597 u8 rssi = priv->assoc_network->stats.rssi;
7598 priv->assoc_network->stats.rssi = -128;
7599 spin_lock_irqsave(&priv->ieee->lock, flags);
7600 list_for_each_entry(network, &priv->ieee->network_list, list) {
7601 if (network != priv->assoc_network)
7602 ipw_best_network(priv, &match, network, 1);
7603 }
7604 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7605 priv->assoc_network->stats.rssi = rssi;
7606
7607 if (match.network == priv->assoc_network) {
7608 IPW_DEBUG_ASSOC("No better APs in this network to "
7609 "roam to.\n");
7610 priv->status &= ~STATUS_ROAMING;
7611 ipw_debug_config(priv);
7612 return;
7613 }
7614
7615 ipw_send_disassociate(priv, 1);
7616 priv->assoc_network = match.network;
7617
7618 return;
7619 }
7620
7621 /* Second pass through ROAM process -- request association */
7622 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7623 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7624 priv->status &= ~STATUS_ROAMING;
7625 }
7626
7627 static void ipw_bg_roam(struct work_struct *work)
7628 {
7629 struct ipw_priv *priv =
7630 container_of(work, struct ipw_priv, roam);
7631 mutex_lock(&priv->mutex);
7632 ipw_roam(priv);
7633 mutex_unlock(&priv->mutex);
7634 }
7635
7636 static int ipw_associate(void *data)
7637 {
7638 struct ipw_priv *priv = data;
7639
7640 struct libipw_network *network = NULL;
7641 struct ipw_network_match match = {
7642 .network = NULL
7643 };
7644 struct ipw_supported_rates *rates;
7645 struct list_head *element;
7646 unsigned long flags;
7647 DECLARE_SSID_BUF(ssid);
7648
7649 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7650 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7651 return 0;
7652 }
7653
7654 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7655 IPW_DEBUG_ASSOC("Not attempting association (already in "
7656 "progress)\n");
7657 return 0;
7658 }
7659
7660 if (priv->status & STATUS_DISASSOCIATING) {
7661 IPW_DEBUG_ASSOC("Not attempting association (in "
7662 "disassociating)\n ");
7663 schedule_work(&priv->associate);
7664 return 0;
7665 }
7666
7667 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7668 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7669 "initialized)\n");
7670 return 0;
7671 }
7672
7673 if (!(priv->config & CFG_ASSOCIATE) &&
7674 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7675 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7676 return 0;
7677 }
7678
7679 /* Protect our use of the network_list */
7680 spin_lock_irqsave(&priv->ieee->lock, flags);
7681 list_for_each_entry(network, &priv->ieee->network_list, list)
7682 ipw_best_network(priv, &match, network, 0);
7683
7684 network = match.network;
7685 rates = &match.rates;
7686
7687 if (network == NULL &&
7688 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7689 priv->config & CFG_ADHOC_CREATE &&
7690 priv->config & CFG_STATIC_ESSID &&
7691 priv->config & CFG_STATIC_CHANNEL) {
7692 /* Use oldest network if the free list is empty */
7693 if (list_empty(&priv->ieee->network_free_list)) {
7694 struct libipw_network *oldest = NULL;
7695 struct libipw_network *target;
7696
7697 list_for_each_entry(target, &priv->ieee->network_list, list) {
7698 if ((oldest == NULL) ||
7699 (target->last_scanned < oldest->last_scanned))
7700 oldest = target;
7701 }
7702
7703 /* If there are no more slots, expire the oldest */
7704 list_del(&oldest->list);
7705 target = oldest;
7706 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7707 "network list.\n",
7708 print_ssid(ssid, target->ssid,
7709 target->ssid_len),
7710 target->bssid);
7711 list_add_tail(&target->list,
7712 &priv->ieee->network_free_list);
7713 }
7714
7715 element = priv->ieee->network_free_list.next;
7716 network = list_entry(element, struct libipw_network, list);
7717 ipw_adhoc_create(priv, network);
7718 rates = &priv->rates;
7719 list_del(element);
7720 list_add_tail(&network->list, &priv->ieee->network_list);
7721 }
7722 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7723
7724 /* If we reached the end of the list, then we don't have any valid
7725 * matching APs */
7726 if (!network) {
7727 ipw_debug_config(priv);
7728
7729 if (!(priv->status & STATUS_SCANNING)) {
7730 if (!(priv->config & CFG_SPEED_SCAN))
7731 schedule_delayed_work(&priv->request_scan,
7732 SCAN_INTERVAL);
7733 else
7734 schedule_delayed_work(&priv->request_scan, 0);
7735 }
7736
7737 return 0;
7738 }
7739
7740 ipw_associate_network(priv, network, rates, 0);
7741
7742 return 1;
7743 }
7744
7745 static void ipw_bg_associate(struct work_struct *work)
7746 {
7747 struct ipw_priv *priv =
7748 container_of(work, struct ipw_priv, associate);
7749 mutex_lock(&priv->mutex);
7750 ipw_associate(priv);
7751 mutex_unlock(&priv->mutex);
7752 }
7753
7754 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7755 struct sk_buff *skb)
7756 {
7757 struct ieee80211_hdr *hdr;
7758 u16 fc;
7759
7760 hdr = (struct ieee80211_hdr *)skb->data;
7761 fc = le16_to_cpu(hdr->frame_control);
7762 if (!(fc & IEEE80211_FCTL_PROTECTED))
7763 return;
7764
7765 fc &= ~IEEE80211_FCTL_PROTECTED;
7766 hdr->frame_control = cpu_to_le16(fc);
7767 switch (priv->ieee->sec.level) {
7768 case SEC_LEVEL_3:
7769 /* Remove CCMP HDR */
7770 memmove(skb->data + LIBIPW_3ADDR_LEN,
7771 skb->data + LIBIPW_3ADDR_LEN + 8,
7772 skb->len - LIBIPW_3ADDR_LEN - 8);
7773 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7774 break;
7775 case SEC_LEVEL_2:
7776 break;
7777 case SEC_LEVEL_1:
7778 /* Remove IV */
7779 memmove(skb->data + LIBIPW_3ADDR_LEN,
7780 skb->data + LIBIPW_3ADDR_LEN + 4,
7781 skb->len - LIBIPW_3ADDR_LEN - 4);
7782 skb_trim(skb, skb->len - 8); /* IV + ICV */
7783 break;
7784 case SEC_LEVEL_0:
7785 break;
7786 default:
7787 printk(KERN_ERR "Unknown security level %d\n",
7788 priv->ieee->sec.level);
7789 break;
7790 }
7791 }
7792
7793 static void ipw_handle_data_packet(struct ipw_priv *priv,
7794 struct ipw_rx_mem_buffer *rxb,
7795 struct libipw_rx_stats *stats)
7796 {
7797 struct net_device *dev = priv->net_dev;
7798 struct libipw_hdr_4addr *hdr;
7799 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7800
7801 /* We received data from the HW, so stop the watchdog */
7802 dev->trans_start = jiffies;
7803
7804 /* We only process data packets if the
7805 * interface is open */
7806 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7807 skb_tailroom(rxb->skb))) {
7808 dev->stats.rx_errors++;
7809 priv->wstats.discard.misc++;
7810 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7811 return;
7812 } else if (unlikely(!netif_running(priv->net_dev))) {
7813 dev->stats.rx_dropped++;
7814 priv->wstats.discard.misc++;
7815 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7816 return;
7817 }
7818
7819 /* Advance skb->data to the start of the actual payload */
7820 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7821
7822 /* Set the size of the skb to the size of the frame */
7823 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7824
7825 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7826
7827 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7828 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7829 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7830 (is_multicast_ether_addr(hdr->addr1) ?
7831 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7832 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7833
7834 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7835 dev->stats.rx_errors++;
7836 else { /* libipw_rx succeeded, so it now owns the SKB */
7837 rxb->skb = NULL;
7838 __ipw_led_activity_on(priv);
7839 }
7840 }
7841
7842 #ifdef CONFIG_IPW2200_RADIOTAP
7843 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7844 struct ipw_rx_mem_buffer *rxb,
7845 struct libipw_rx_stats *stats)
7846 {
7847 struct net_device *dev = priv->net_dev;
7848 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7849 struct ipw_rx_frame *frame = &pkt->u.frame;
7850
7851 /* initial pull of some data */
7852 u16 received_channel = frame->received_channel;
7853 u8 antennaAndPhy = frame->antennaAndPhy;
7854 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7855 u16 pktrate = frame->rate;
7856
7857 /* Magic struct that slots into the radiotap header -- no reason
7858 * to build this manually element by element, we can write it much
7859 * more efficiently than we can parse it. ORDER MATTERS HERE */
7860 struct ipw_rt_hdr *ipw_rt;
7861
7862 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7863
7864 /* We received data from the HW, so stop the watchdog */
7865 dev->trans_start = jiffies;
7866
7867 /* We only process data packets if the
7868 * interface is open */
7869 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7870 skb_tailroom(rxb->skb))) {
7871 dev->stats.rx_errors++;
7872 priv->wstats.discard.misc++;
7873 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7874 return;
7875 } else if (unlikely(!netif_running(priv->net_dev))) {
7876 dev->stats.rx_dropped++;
7877 priv->wstats.discard.misc++;
7878 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7879 return;
7880 }
7881
7882 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7883 * that now */
7884 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7885 /* FIXME: Should alloc bigger skb instead */
7886 dev->stats.rx_dropped++;
7887 priv->wstats.discard.misc++;
7888 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7889 return;
7890 }
7891
7892 /* copy the frame itself */
7893 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7894 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7895
7896 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7897
7898 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7899 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7900 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7901
7902 /* Big bitfield of all the fields we provide in radiotap */
7903 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7904 (1 << IEEE80211_RADIOTAP_TSFT) |
7905 (1 << IEEE80211_RADIOTAP_FLAGS) |
7906 (1 << IEEE80211_RADIOTAP_RATE) |
7907 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7908 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7909 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7910 (1 << IEEE80211_RADIOTAP_ANTENNA));
7911
7912 /* Zero the flags, we'll add to them as we go */
7913 ipw_rt->rt_flags = 0;
7914 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7915 frame->parent_tsf[2] << 16 |
7916 frame->parent_tsf[1] << 8 |
7917 frame->parent_tsf[0]);
7918
7919 /* Convert signal to DBM */
7920 ipw_rt->rt_dbmsignal = antsignal;
7921 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7922
7923 /* Convert the channel data and set the flags */
7924 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7925 if (received_channel > 14) { /* 802.11a */
7926 ipw_rt->rt_chbitmask =
7927 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7928 } else if (antennaAndPhy & 32) { /* 802.11b */
7929 ipw_rt->rt_chbitmask =
7930 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7931 } else { /* 802.11g */
7932 ipw_rt->rt_chbitmask =
7933 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7934 }
7935
7936 /* set the rate in multiples of 500k/s */
7937 switch (pktrate) {
7938 case IPW_TX_RATE_1MB:
7939 ipw_rt->rt_rate = 2;
7940 break;
7941 case IPW_TX_RATE_2MB:
7942 ipw_rt->rt_rate = 4;
7943 break;
7944 case IPW_TX_RATE_5MB:
7945 ipw_rt->rt_rate = 10;
7946 break;
7947 case IPW_TX_RATE_6MB:
7948 ipw_rt->rt_rate = 12;
7949 break;
7950 case IPW_TX_RATE_9MB:
7951 ipw_rt->rt_rate = 18;
7952 break;
7953 case IPW_TX_RATE_11MB:
7954 ipw_rt->rt_rate = 22;
7955 break;
7956 case IPW_TX_RATE_12MB:
7957 ipw_rt->rt_rate = 24;
7958 break;
7959 case IPW_TX_RATE_18MB:
7960 ipw_rt->rt_rate = 36;
7961 break;
7962 case IPW_TX_RATE_24MB:
7963 ipw_rt->rt_rate = 48;
7964 break;
7965 case IPW_TX_RATE_36MB:
7966 ipw_rt->rt_rate = 72;
7967 break;
7968 case IPW_TX_RATE_48MB:
7969 ipw_rt->rt_rate = 96;
7970 break;
7971 case IPW_TX_RATE_54MB:
7972 ipw_rt->rt_rate = 108;
7973 break;
7974 default:
7975 ipw_rt->rt_rate = 0;
7976 break;
7977 }
7978
7979 /* antenna number */
7980 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7981
7982 /* set the preamble flag if we have it */
7983 if ((antennaAndPhy & 64))
7984 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7985
7986 /* Set the size of the skb to the size of the frame */
7987 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7988
7989 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7990
7991 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7992 dev->stats.rx_errors++;
7993 else { /* libipw_rx succeeded, so it now owns the SKB */
7994 rxb->skb = NULL;
7995 /* no LED during capture */
7996 }
7997 }
7998 #endif
7999
8000 #ifdef CONFIG_IPW2200_PROMISCUOUS
8001 #define libipw_is_probe_response(fc) \
8002 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
8003 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
8004
8005 #define libipw_is_management(fc) \
8006 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
8007
8008 #define libipw_is_control(fc) \
8009 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
8010
8011 #define libipw_is_data(fc) \
8012 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8013
8014 #define libipw_is_assoc_request(fc) \
8015 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8016
8017 #define libipw_is_reassoc_request(fc) \
8018 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8019
8020 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8021 struct ipw_rx_mem_buffer *rxb,
8022 struct libipw_rx_stats *stats)
8023 {
8024 struct net_device *dev = priv->prom_net_dev;
8025 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8026 struct ipw_rx_frame *frame = &pkt->u.frame;
8027 struct ipw_rt_hdr *ipw_rt;
8028
8029 /* First cache any information we need before we overwrite
8030 * the information provided in the skb from the hardware */
8031 struct ieee80211_hdr *hdr;
8032 u16 channel = frame->received_channel;
8033 u8 phy_flags = frame->antennaAndPhy;
8034 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8035 s8 noise = (s8) le16_to_cpu(frame->noise);
8036 u8 rate = frame->rate;
8037 unsigned short len = le16_to_cpu(pkt->u.frame.length);
8038 struct sk_buff *skb;
8039 int hdr_only = 0;
8040 u16 filter = priv->prom_priv->filter;
8041
8042 /* If the filter is set to not include Rx frames then return */
8043 if (filter & IPW_PROM_NO_RX)
8044 return;
8045
8046 /* We received data from the HW, so stop the watchdog */
8047 dev->trans_start = jiffies;
8048
8049 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8050 dev->stats.rx_errors++;
8051 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8052 return;
8053 }
8054
8055 /* We only process data packets if the interface is open */
8056 if (unlikely(!netif_running(dev))) {
8057 dev->stats.rx_dropped++;
8058 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8059 return;
8060 }
8061
8062 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8063 * that now */
8064 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8065 /* FIXME: Should alloc bigger skb instead */
8066 dev->stats.rx_dropped++;
8067 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8068 return;
8069 }
8070
8071 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8072 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8073 if (filter & IPW_PROM_NO_MGMT)
8074 return;
8075 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8076 hdr_only = 1;
8077 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8078 if (filter & IPW_PROM_NO_CTL)
8079 return;
8080 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8081 hdr_only = 1;
8082 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8083 if (filter & IPW_PROM_NO_DATA)
8084 return;
8085 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8086 hdr_only = 1;
8087 }
8088
8089 /* Copy the SKB since this is for the promiscuous side */
8090 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8091 if (skb == NULL) {
8092 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8093 return;
8094 }
8095
8096 /* copy the frame data to write after where the radiotap header goes */
8097 ipw_rt = (void *)skb->data;
8098
8099 if (hdr_only)
8100 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8101
8102 memcpy(ipw_rt->payload, hdr, len);
8103
8104 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8105 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8106 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8107
8108 /* Set the size of the skb to the size of the frame */
8109 skb_put(skb, sizeof(*ipw_rt) + len);
8110
8111 /* Big bitfield of all the fields we provide in radiotap */
8112 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8113 (1 << IEEE80211_RADIOTAP_TSFT) |
8114 (1 << IEEE80211_RADIOTAP_FLAGS) |
8115 (1 << IEEE80211_RADIOTAP_RATE) |
8116 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8117 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8118 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8119 (1 << IEEE80211_RADIOTAP_ANTENNA));
8120
8121 /* Zero the flags, we'll add to them as we go */
8122 ipw_rt->rt_flags = 0;
8123 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8124 frame->parent_tsf[2] << 16 |
8125 frame->parent_tsf[1] << 8 |
8126 frame->parent_tsf[0]);
8127
8128 /* Convert to DBM */
8129 ipw_rt->rt_dbmsignal = signal;
8130 ipw_rt->rt_dbmnoise = noise;
8131
8132 /* Convert the channel data and set the flags */
8133 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8134 if (channel > 14) { /* 802.11a */
8135 ipw_rt->rt_chbitmask =
8136 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8137 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8138 ipw_rt->rt_chbitmask =
8139 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8140 } else { /* 802.11g */
8141 ipw_rt->rt_chbitmask =
8142 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8143 }
8144
8145 /* set the rate in multiples of 500k/s */
8146 switch (rate) {
8147 case IPW_TX_RATE_1MB:
8148 ipw_rt->rt_rate = 2;
8149 break;
8150 case IPW_TX_RATE_2MB:
8151 ipw_rt->rt_rate = 4;
8152 break;
8153 case IPW_TX_RATE_5MB:
8154 ipw_rt->rt_rate = 10;
8155 break;
8156 case IPW_TX_RATE_6MB:
8157 ipw_rt->rt_rate = 12;
8158 break;
8159 case IPW_TX_RATE_9MB:
8160 ipw_rt->rt_rate = 18;
8161 break;
8162 case IPW_TX_RATE_11MB:
8163 ipw_rt->rt_rate = 22;
8164 break;
8165 case IPW_TX_RATE_12MB:
8166 ipw_rt->rt_rate = 24;
8167 break;
8168 case IPW_TX_RATE_18MB:
8169 ipw_rt->rt_rate = 36;
8170 break;
8171 case IPW_TX_RATE_24MB:
8172 ipw_rt->rt_rate = 48;
8173 break;
8174 case IPW_TX_RATE_36MB:
8175 ipw_rt->rt_rate = 72;
8176 break;
8177 case IPW_TX_RATE_48MB:
8178 ipw_rt->rt_rate = 96;
8179 break;
8180 case IPW_TX_RATE_54MB:
8181 ipw_rt->rt_rate = 108;
8182 break;
8183 default:
8184 ipw_rt->rt_rate = 0;
8185 break;
8186 }
8187
8188 /* antenna number */
8189 ipw_rt->rt_antenna = (phy_flags & 3);
8190
8191 /* set the preamble flag if we have it */
8192 if (phy_flags & (1 << 6))
8193 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8194
8195 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8196
8197 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8198 dev->stats.rx_errors++;
8199 dev_kfree_skb_any(skb);
8200 }
8201 }
8202 #endif
8203
8204 static int is_network_packet(struct ipw_priv *priv,
8205 struct libipw_hdr_4addr *header)
8206 {
8207 /* Filter incoming packets to determine if they are targeted toward
8208 * this network, discarding packets coming from ourselves */
8209 switch (priv->ieee->iw_mode) {
8210 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8211 /* packets from our adapter are dropped (echo) */
8212 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8213 return 0;
8214
8215 /* {broad,multi}cast packets to our BSSID go through */
8216 if (is_multicast_ether_addr(header->addr1))
8217 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8218
8219 /* packets to our adapter go through */
8220 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8221 ETH_ALEN);
8222
8223 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8224 /* packets from our adapter are dropped (echo) */
8225 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8226 return 0;
8227
8228 /* {broad,multi}cast packets to our BSS go through */
8229 if (is_multicast_ether_addr(header->addr1))
8230 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8231
8232 /* packets to our adapter go through */
8233 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8234 ETH_ALEN);
8235 }
8236
8237 return 1;
8238 }
8239
8240 #define IPW_PACKET_RETRY_TIME HZ
8241
8242 static int is_duplicate_packet(struct ipw_priv *priv,
8243 struct libipw_hdr_4addr *header)
8244 {
8245 u16 sc = le16_to_cpu(header->seq_ctl);
8246 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8247 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8248 u16 *last_seq, *last_frag;
8249 unsigned long *last_time;
8250
8251 switch (priv->ieee->iw_mode) {
8252 case IW_MODE_ADHOC:
8253 {
8254 struct list_head *p;
8255 struct ipw_ibss_seq *entry = NULL;
8256 u8 *mac = header->addr2;
8257 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8258
8259 list_for_each(p, &priv->ibss_mac_hash[index]) {
8260 entry =
8261 list_entry(p, struct ipw_ibss_seq, list);
8262 if (!memcmp(entry->mac, mac, ETH_ALEN))
8263 break;
8264 }
8265 if (p == &priv->ibss_mac_hash[index]) {
8266 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8267 if (!entry) {
8268 IPW_ERROR
8269 ("Cannot malloc new mac entry\n");
8270 return 0;
8271 }
8272 memcpy(entry->mac, mac, ETH_ALEN);
8273 entry->seq_num = seq;
8274 entry->frag_num = frag;
8275 entry->packet_time = jiffies;
8276 list_add(&entry->list,
8277 &priv->ibss_mac_hash[index]);
8278 return 0;
8279 }
8280 last_seq = &entry->seq_num;
8281 last_frag = &entry->frag_num;
8282 last_time = &entry->packet_time;
8283 break;
8284 }
8285 case IW_MODE_INFRA:
8286 last_seq = &priv->last_seq_num;
8287 last_frag = &priv->last_frag_num;
8288 last_time = &priv->last_packet_time;
8289 break;
8290 default:
8291 return 0;
8292 }
8293 if ((*last_seq == seq) &&
8294 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8295 if (*last_frag == frag)
8296 goto drop;
8297 if (*last_frag + 1 != frag)
8298 /* out-of-order fragment */
8299 goto drop;
8300 } else
8301 *last_seq = seq;
8302
8303 *last_frag = frag;
8304 *last_time = jiffies;
8305 return 0;
8306
8307 drop:
8308 /* Comment this line now since we observed the card receives
8309 * duplicate packets but the FCTL_RETRY bit is not set in the
8310 * IBSS mode with fragmentation enabled.
8311 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8312 return 1;
8313 }
8314
8315 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8316 struct ipw_rx_mem_buffer *rxb,
8317 struct libipw_rx_stats *stats)
8318 {
8319 struct sk_buff *skb = rxb->skb;
8320 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8321 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8322 (skb->data + IPW_RX_FRAME_SIZE);
8323
8324 libipw_rx_mgt(priv->ieee, header, stats);
8325
8326 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8327 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8328 IEEE80211_STYPE_PROBE_RESP) ||
8329 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8330 IEEE80211_STYPE_BEACON))) {
8331 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8332 ipw_add_station(priv, header->addr2);
8333 }
8334
8335 if (priv->config & CFG_NET_STATS) {
8336 IPW_DEBUG_HC("sending stat packet\n");
8337
8338 /* Set the size of the skb to the size of the full
8339 * ipw header and 802.11 frame */
8340 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8341 IPW_RX_FRAME_SIZE);
8342
8343 /* Advance past the ipw packet header to the 802.11 frame */
8344 skb_pull(skb, IPW_RX_FRAME_SIZE);
8345
8346 /* Push the libipw_rx_stats before the 802.11 frame */
8347 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8348
8349 skb->dev = priv->ieee->dev;
8350
8351 /* Point raw at the libipw_stats */
8352 skb_reset_mac_header(skb);
8353
8354 skb->pkt_type = PACKET_OTHERHOST;
8355 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8356 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8357 netif_rx(skb);
8358 rxb->skb = NULL;
8359 }
8360 }
8361
8362 /*
8363 * Main entry function for receiving a packet with 80211 headers. This
8364 * should be called when ever the FW has notified us that there is a new
8365 * skb in the receive queue.
8366 */
8367 static void ipw_rx(struct ipw_priv *priv)
8368 {
8369 struct ipw_rx_mem_buffer *rxb;
8370 struct ipw_rx_packet *pkt;
8371 struct libipw_hdr_4addr *header;
8372 u32 r, w, i;
8373 u8 network_packet;
8374 u8 fill_rx = 0;
8375
8376 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8377 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8378 i = priv->rxq->read;
8379
8380 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8381 fill_rx = 1;
8382
8383 while (i != r) {
8384 rxb = priv->rxq->queue[i];
8385 if (unlikely(rxb == NULL)) {
8386 printk(KERN_CRIT "Queue not allocated!\n");
8387 break;
8388 }
8389 priv->rxq->queue[i] = NULL;
8390
8391 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8392 IPW_RX_BUF_SIZE,
8393 PCI_DMA_FROMDEVICE);
8394
8395 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8396 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8397 pkt->header.message_type,
8398 pkt->header.rx_seq_num, pkt->header.control_bits);
8399
8400 switch (pkt->header.message_type) {
8401 case RX_FRAME_TYPE: /* 802.11 frame */ {
8402 struct libipw_rx_stats stats = {
8403 .rssi = pkt->u.frame.rssi_dbm -
8404 IPW_RSSI_TO_DBM,
8405 .signal =
8406 pkt->u.frame.rssi_dbm -
8407 IPW_RSSI_TO_DBM + 0x100,
8408 .noise =
8409 le16_to_cpu(pkt->u.frame.noise),
8410 .rate = pkt->u.frame.rate,
8411 .mac_time = jiffies,
8412 .received_channel =
8413 pkt->u.frame.received_channel,
8414 .freq =
8415 (pkt->u.frame.
8416 control & (1 << 0)) ?
8417 LIBIPW_24GHZ_BAND :
8418 LIBIPW_52GHZ_BAND,
8419 .len = le16_to_cpu(pkt->u.frame.length),
8420 };
8421
8422 if (stats.rssi != 0)
8423 stats.mask |= LIBIPW_STATMASK_RSSI;
8424 if (stats.signal != 0)
8425 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8426 if (stats.noise != 0)
8427 stats.mask |= LIBIPW_STATMASK_NOISE;
8428 if (stats.rate != 0)
8429 stats.mask |= LIBIPW_STATMASK_RATE;
8430
8431 priv->rx_packets++;
8432
8433 #ifdef CONFIG_IPW2200_PROMISCUOUS
8434 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8435 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8436 #endif
8437
8438 #ifdef CONFIG_IPW2200_MONITOR
8439 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8440 #ifdef CONFIG_IPW2200_RADIOTAP
8441
8442 ipw_handle_data_packet_monitor(priv,
8443 rxb,
8444 &stats);
8445 #else
8446 ipw_handle_data_packet(priv, rxb,
8447 &stats);
8448 #endif
8449 break;
8450 }
8451 #endif
8452
8453 header =
8454 (struct libipw_hdr_4addr *)(rxb->skb->
8455 data +
8456 IPW_RX_FRAME_SIZE);
8457 /* TODO: Check Ad-Hoc dest/source and make sure
8458 * that we are actually parsing these packets
8459 * correctly -- we should probably use the
8460 * frame control of the packet and disregard
8461 * the current iw_mode */
8462
8463 network_packet =
8464 is_network_packet(priv, header);
8465 if (network_packet && priv->assoc_network) {
8466 priv->assoc_network->stats.rssi =
8467 stats.rssi;
8468 priv->exp_avg_rssi =
8469 exponential_average(priv->exp_avg_rssi,
8470 stats.rssi, DEPTH_RSSI);
8471 }
8472
8473 IPW_DEBUG_RX("Frame: len=%u\n",
8474 le16_to_cpu(pkt->u.frame.length));
8475
8476 if (le16_to_cpu(pkt->u.frame.length) <
8477 libipw_get_hdrlen(le16_to_cpu(
8478 header->frame_ctl))) {
8479 IPW_DEBUG_DROP
8480 ("Received packet is too small. "
8481 "Dropping.\n");
8482 priv->net_dev->stats.rx_errors++;
8483 priv->wstats.discard.misc++;
8484 break;
8485 }
8486
8487 switch (WLAN_FC_GET_TYPE
8488 (le16_to_cpu(header->frame_ctl))) {
8489
8490 case IEEE80211_FTYPE_MGMT:
8491 ipw_handle_mgmt_packet(priv, rxb,
8492 &stats);
8493 break;
8494
8495 case IEEE80211_FTYPE_CTL:
8496 break;
8497
8498 case IEEE80211_FTYPE_DATA:
8499 if (unlikely(!network_packet ||
8500 is_duplicate_packet(priv,
8501 header)))
8502 {
8503 IPW_DEBUG_DROP("Dropping: "
8504 "%pM, "
8505 "%pM, "
8506 "%pM\n",
8507 header->addr1,
8508 header->addr2,
8509 header->addr3);
8510 break;
8511 }
8512
8513 ipw_handle_data_packet(priv, rxb,
8514 &stats);
8515
8516 break;
8517 }
8518 break;
8519 }
8520
8521 case RX_HOST_NOTIFICATION_TYPE:{
8522 IPW_DEBUG_RX
8523 ("Notification: subtype=%02X flags=%02X size=%d\n",
8524 pkt->u.notification.subtype,
8525 pkt->u.notification.flags,
8526 le16_to_cpu(pkt->u.notification.size));
8527 ipw_rx_notification(priv, &pkt->u.notification);
8528 break;
8529 }
8530
8531 default:
8532 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8533 pkt->header.message_type);
8534 break;
8535 }
8536
8537 /* For now we just don't re-use anything. We can tweak this
8538 * later to try and re-use notification packets and SKBs that
8539 * fail to Rx correctly */
8540 if (rxb->skb != NULL) {
8541 dev_kfree_skb_any(rxb->skb);
8542 rxb->skb = NULL;
8543 }
8544
8545 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8546 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8547 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8548
8549 i = (i + 1) % RX_QUEUE_SIZE;
8550
8551 /* If there are a lot of unsued frames, restock the Rx queue
8552 * so the ucode won't assert */
8553 if (fill_rx) {
8554 priv->rxq->read = i;
8555 ipw_rx_queue_replenish(priv);
8556 }
8557 }
8558
8559 /* Backtrack one entry */
8560 priv->rxq->read = i;
8561 ipw_rx_queue_restock(priv);
8562 }
8563
8564 #define DEFAULT_RTS_THRESHOLD 2304U
8565 #define MIN_RTS_THRESHOLD 1U
8566 #define MAX_RTS_THRESHOLD 2304U
8567 #define DEFAULT_BEACON_INTERVAL 100U
8568 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8569 #define DEFAULT_LONG_RETRY_LIMIT 4U
8570
8571 /**
8572 * ipw_sw_reset
8573 * @option: options to control different reset behaviour
8574 * 0 = reset everything except the 'disable' module_param
8575 * 1 = reset everything and print out driver info (for probe only)
8576 * 2 = reset everything
8577 */
8578 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8579 {
8580 int band, modulation;
8581 int old_mode = priv->ieee->iw_mode;
8582
8583 /* Initialize module parameter values here */
8584 priv->config = 0;
8585
8586 /* We default to disabling the LED code as right now it causes
8587 * too many systems to lock up... */
8588 if (!led_support)
8589 priv->config |= CFG_NO_LED;
8590
8591 if (associate)
8592 priv->config |= CFG_ASSOCIATE;
8593 else
8594 IPW_DEBUG_INFO("Auto associate disabled.\n");
8595
8596 if (auto_create)
8597 priv->config |= CFG_ADHOC_CREATE;
8598 else
8599 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8600
8601 priv->config &= ~CFG_STATIC_ESSID;
8602 priv->essid_len = 0;
8603 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8604
8605 if (disable && option) {
8606 priv->status |= STATUS_RF_KILL_SW;
8607 IPW_DEBUG_INFO("Radio disabled.\n");
8608 }
8609
8610 if (default_channel != 0) {
8611 priv->config |= CFG_STATIC_CHANNEL;
8612 priv->channel = default_channel;
8613 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8614 /* TODO: Validate that provided channel is in range */
8615 }
8616 #ifdef CONFIG_IPW2200_QOS
8617 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8618 burst_duration_CCK, burst_duration_OFDM);
8619 #endif /* CONFIG_IPW2200_QOS */
8620
8621 switch (network_mode) {
8622 case 1:
8623 priv->ieee->iw_mode = IW_MODE_ADHOC;
8624 priv->net_dev->type = ARPHRD_ETHER;
8625
8626 break;
8627 #ifdef CONFIG_IPW2200_MONITOR
8628 case 2:
8629 priv->ieee->iw_mode = IW_MODE_MONITOR;
8630 #ifdef CONFIG_IPW2200_RADIOTAP
8631 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8632 #else
8633 priv->net_dev->type = ARPHRD_IEEE80211;
8634 #endif
8635 break;
8636 #endif
8637 default:
8638 case 0:
8639 priv->net_dev->type = ARPHRD_ETHER;
8640 priv->ieee->iw_mode = IW_MODE_INFRA;
8641 break;
8642 }
8643
8644 if (hwcrypto) {
8645 priv->ieee->host_encrypt = 0;
8646 priv->ieee->host_encrypt_msdu = 0;
8647 priv->ieee->host_decrypt = 0;
8648 priv->ieee->host_mc_decrypt = 0;
8649 }
8650 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8651
8652 /* IPW2200/2915 is abled to do hardware fragmentation. */
8653 priv->ieee->host_open_frag = 0;
8654
8655 if ((priv->pci_dev->device == 0x4223) ||
8656 (priv->pci_dev->device == 0x4224)) {
8657 if (option == 1)
8658 printk(KERN_INFO DRV_NAME
8659 ": Detected Intel PRO/Wireless 2915ABG Network "
8660 "Connection\n");
8661 priv->ieee->abg_true = 1;
8662 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8663 modulation = LIBIPW_OFDM_MODULATION |
8664 LIBIPW_CCK_MODULATION;
8665 priv->adapter = IPW_2915ABG;
8666 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8667 } else {
8668 if (option == 1)
8669 printk(KERN_INFO DRV_NAME
8670 ": Detected Intel PRO/Wireless 2200BG Network "
8671 "Connection\n");
8672
8673 priv->ieee->abg_true = 0;
8674 band = LIBIPW_24GHZ_BAND;
8675 modulation = LIBIPW_OFDM_MODULATION |
8676 LIBIPW_CCK_MODULATION;
8677 priv->adapter = IPW_2200BG;
8678 priv->ieee->mode = IEEE_G | IEEE_B;
8679 }
8680
8681 priv->ieee->freq_band = band;
8682 priv->ieee->modulation = modulation;
8683
8684 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8685
8686 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8687 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8688
8689 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8690 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8691 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8692
8693 /* If power management is turned on, default to AC mode */
8694 priv->power_mode = IPW_POWER_AC;
8695 priv->tx_power = IPW_TX_POWER_DEFAULT;
8696
8697 return old_mode == priv->ieee->iw_mode;
8698 }
8699
8700 /*
8701 * This file defines the Wireless Extension handlers. It does not
8702 * define any methods of hardware manipulation and relies on the
8703 * functions defined in ipw_main to provide the HW interaction.
8704 *
8705 * The exception to this is the use of the ipw_get_ordinal()
8706 * function used to poll the hardware vs. making unnecessary calls.
8707 *
8708 */
8709
8710 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8711 {
8712 if (channel == 0) {
8713 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8714 priv->config &= ~CFG_STATIC_CHANNEL;
8715 IPW_DEBUG_ASSOC("Attempting to associate with new "
8716 "parameters.\n");
8717 ipw_associate(priv);
8718 return 0;
8719 }
8720
8721 priv->config |= CFG_STATIC_CHANNEL;
8722
8723 if (priv->channel == channel) {
8724 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8725 channel);
8726 return 0;
8727 }
8728
8729 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8730 priv->channel = channel;
8731
8732 #ifdef CONFIG_IPW2200_MONITOR
8733 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8734 int i;
8735 if (priv->status & STATUS_SCANNING) {
8736 IPW_DEBUG_SCAN("Scan abort triggered due to "
8737 "channel change.\n");
8738 ipw_abort_scan(priv);
8739 }
8740
8741 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8742 udelay(10);
8743
8744 if (priv->status & STATUS_SCANNING)
8745 IPW_DEBUG_SCAN("Still scanning...\n");
8746 else
8747 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8748 1000 - i);
8749
8750 return 0;
8751 }
8752 #endif /* CONFIG_IPW2200_MONITOR */
8753
8754 /* Network configuration changed -- force [re]association */
8755 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8756 if (!ipw_disassociate(priv))
8757 ipw_associate(priv);
8758
8759 return 0;
8760 }
8761
8762 static int ipw_wx_set_freq(struct net_device *dev,
8763 struct iw_request_info *info,
8764 union iwreq_data *wrqu, char *extra)
8765 {
8766 struct ipw_priv *priv = libipw_priv(dev);
8767 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8768 struct iw_freq *fwrq = &wrqu->freq;
8769 int ret = 0, i;
8770 u8 channel, flags;
8771 int band;
8772
8773 if (fwrq->m == 0) {
8774 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8775 mutex_lock(&priv->mutex);
8776 ret = ipw_set_channel(priv, 0);
8777 mutex_unlock(&priv->mutex);
8778 return ret;
8779 }
8780 /* if setting by freq convert to channel */
8781 if (fwrq->e == 1) {
8782 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8783 if (channel == 0)
8784 return -EINVAL;
8785 } else
8786 channel = fwrq->m;
8787
8788 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8789 return -EINVAL;
8790
8791 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8792 i = libipw_channel_to_index(priv->ieee, channel);
8793 if (i == -1)
8794 return -EINVAL;
8795
8796 flags = (band == LIBIPW_24GHZ_BAND) ?
8797 geo->bg[i].flags : geo->a[i].flags;
8798 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8799 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8800 return -EINVAL;
8801 }
8802 }
8803
8804 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8805 mutex_lock(&priv->mutex);
8806 ret = ipw_set_channel(priv, channel);
8807 mutex_unlock(&priv->mutex);
8808 return ret;
8809 }
8810
8811 static int ipw_wx_get_freq(struct net_device *dev,
8812 struct iw_request_info *info,
8813 union iwreq_data *wrqu, char *extra)
8814 {
8815 struct ipw_priv *priv = libipw_priv(dev);
8816
8817 wrqu->freq.e = 0;
8818
8819 /* If we are associated, trying to associate, or have a statically
8820 * configured CHANNEL then return that; otherwise return ANY */
8821 mutex_lock(&priv->mutex);
8822 if (priv->config & CFG_STATIC_CHANNEL ||
8823 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8824 int i;
8825
8826 i = libipw_channel_to_index(priv->ieee, priv->channel);
8827 BUG_ON(i == -1);
8828 wrqu->freq.e = 1;
8829
8830 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8831 case LIBIPW_52GHZ_BAND:
8832 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8833 break;
8834
8835 case LIBIPW_24GHZ_BAND:
8836 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8837 break;
8838
8839 default:
8840 BUG();
8841 }
8842 } else
8843 wrqu->freq.m = 0;
8844
8845 mutex_unlock(&priv->mutex);
8846 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8847 return 0;
8848 }
8849
8850 static int ipw_wx_set_mode(struct net_device *dev,
8851 struct iw_request_info *info,
8852 union iwreq_data *wrqu, char *extra)
8853 {
8854 struct ipw_priv *priv = libipw_priv(dev);
8855 int err = 0;
8856
8857 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8858
8859 switch (wrqu->mode) {
8860 #ifdef CONFIG_IPW2200_MONITOR
8861 case IW_MODE_MONITOR:
8862 #endif
8863 case IW_MODE_ADHOC:
8864 case IW_MODE_INFRA:
8865 break;
8866 case IW_MODE_AUTO:
8867 wrqu->mode = IW_MODE_INFRA;
8868 break;
8869 default:
8870 return -EINVAL;
8871 }
8872 if (wrqu->mode == priv->ieee->iw_mode)
8873 return 0;
8874
8875 mutex_lock(&priv->mutex);
8876
8877 ipw_sw_reset(priv, 0);
8878
8879 #ifdef CONFIG_IPW2200_MONITOR
8880 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8881 priv->net_dev->type = ARPHRD_ETHER;
8882
8883 if (wrqu->mode == IW_MODE_MONITOR)
8884 #ifdef CONFIG_IPW2200_RADIOTAP
8885 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8886 #else
8887 priv->net_dev->type = ARPHRD_IEEE80211;
8888 #endif
8889 #endif /* CONFIG_IPW2200_MONITOR */
8890
8891 /* Free the existing firmware and reset the fw_loaded
8892 * flag so ipw_load() will bring in the new firmware */
8893 free_firmware();
8894
8895 priv->ieee->iw_mode = wrqu->mode;
8896
8897 schedule_work(&priv->adapter_restart);
8898 mutex_unlock(&priv->mutex);
8899 return err;
8900 }
8901
8902 static int ipw_wx_get_mode(struct net_device *dev,
8903 struct iw_request_info *info,
8904 union iwreq_data *wrqu, char *extra)
8905 {
8906 struct ipw_priv *priv = libipw_priv(dev);
8907 mutex_lock(&priv->mutex);
8908 wrqu->mode = priv->ieee->iw_mode;
8909 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8910 mutex_unlock(&priv->mutex);
8911 return 0;
8912 }
8913
8914 /* Values are in microsecond */
8915 static const s32 timeout_duration[] = {
8916 350000,
8917 250000,
8918 75000,
8919 37000,
8920 25000,
8921 };
8922
8923 static const s32 period_duration[] = {
8924 400000,
8925 700000,
8926 1000000,
8927 1000000,
8928 1000000
8929 };
8930
8931 static int ipw_wx_get_range(struct net_device *dev,
8932 struct iw_request_info *info,
8933 union iwreq_data *wrqu, char *extra)
8934 {
8935 struct ipw_priv *priv = libipw_priv(dev);
8936 struct iw_range *range = (struct iw_range *)extra;
8937 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8938 int i = 0, j;
8939
8940 wrqu->data.length = sizeof(*range);
8941 memset(range, 0, sizeof(*range));
8942
8943 /* 54Mbs == ~27 Mb/s real (802.11g) */
8944 range->throughput = 27 * 1000 * 1000;
8945
8946 range->max_qual.qual = 100;
8947 /* TODO: Find real max RSSI and stick here */
8948 range->max_qual.level = 0;
8949 range->max_qual.noise = 0;
8950 range->max_qual.updated = 7; /* Updated all three */
8951
8952 range->avg_qual.qual = 70;
8953 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8954 range->avg_qual.level = 0; /* FIXME to real average level */
8955 range->avg_qual.noise = 0;
8956 range->avg_qual.updated = 7; /* Updated all three */
8957 mutex_lock(&priv->mutex);
8958 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8959
8960 for (i = 0; i < range->num_bitrates; i++)
8961 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8962 500000;
8963
8964 range->max_rts = DEFAULT_RTS_THRESHOLD;
8965 range->min_frag = MIN_FRAG_THRESHOLD;
8966 range->max_frag = MAX_FRAG_THRESHOLD;
8967
8968 range->encoding_size[0] = 5;
8969 range->encoding_size[1] = 13;
8970 range->num_encoding_sizes = 2;
8971 range->max_encoding_tokens = WEP_KEYS;
8972
8973 /* Set the Wireless Extension versions */
8974 range->we_version_compiled = WIRELESS_EXT;
8975 range->we_version_source = 18;
8976
8977 i = 0;
8978 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8979 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8980 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8981 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8982 continue;
8983
8984 range->freq[i].i = geo->bg[j].channel;
8985 range->freq[i].m = geo->bg[j].freq * 100000;
8986 range->freq[i].e = 1;
8987 i++;
8988 }
8989 }
8990
8991 if (priv->ieee->mode & IEEE_A) {
8992 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8993 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8994 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8995 continue;
8996
8997 range->freq[i].i = geo->a[j].channel;
8998 range->freq[i].m = geo->a[j].freq * 100000;
8999 range->freq[i].e = 1;
9000 i++;
9001 }
9002 }
9003
9004 range->num_channels = i;
9005 range->num_frequency = i;
9006
9007 mutex_unlock(&priv->mutex);
9008
9009 /* Event capability (kernel + driver) */
9010 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9011 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9012 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9013 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9014 range->event_capa[1] = IW_EVENT_CAPA_K_1;
9015
9016 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9017 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9018
9019 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9020
9021 IPW_DEBUG_WX("GET Range\n");
9022 return 0;
9023 }
9024
9025 static int ipw_wx_set_wap(struct net_device *dev,
9026 struct iw_request_info *info,
9027 union iwreq_data *wrqu, char *extra)
9028 {
9029 struct ipw_priv *priv = libipw_priv(dev);
9030
9031 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9032 return -EINVAL;
9033 mutex_lock(&priv->mutex);
9034 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
9035 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
9036 /* we disable mandatory BSSID association */
9037 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9038 priv->config &= ~CFG_STATIC_BSSID;
9039 IPW_DEBUG_ASSOC("Attempting to associate with new "
9040 "parameters.\n");
9041 ipw_associate(priv);
9042 mutex_unlock(&priv->mutex);
9043 return 0;
9044 }
9045
9046 priv->config |= CFG_STATIC_BSSID;
9047 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9048 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9049 mutex_unlock(&priv->mutex);
9050 return 0;
9051 }
9052
9053 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9054 wrqu->ap_addr.sa_data);
9055
9056 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9057
9058 /* Network configuration changed -- force [re]association */
9059 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9060 if (!ipw_disassociate(priv))
9061 ipw_associate(priv);
9062
9063 mutex_unlock(&priv->mutex);
9064 return 0;
9065 }
9066
9067 static int ipw_wx_get_wap(struct net_device *dev,
9068 struct iw_request_info *info,
9069 union iwreq_data *wrqu, char *extra)
9070 {
9071 struct ipw_priv *priv = libipw_priv(dev);
9072
9073 /* If we are associated, trying to associate, or have a statically
9074 * configured BSSID then return that; otherwise return ANY */
9075 mutex_lock(&priv->mutex);
9076 if (priv->config & CFG_STATIC_BSSID ||
9077 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9078 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9079 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9080 } else
9081 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9082
9083 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9084 wrqu->ap_addr.sa_data);
9085 mutex_unlock(&priv->mutex);
9086 return 0;
9087 }
9088
9089 static int ipw_wx_set_essid(struct net_device *dev,
9090 struct iw_request_info *info,
9091 union iwreq_data *wrqu, char *extra)
9092 {
9093 struct ipw_priv *priv = libipw_priv(dev);
9094 int length;
9095 DECLARE_SSID_BUF(ssid);
9096
9097 mutex_lock(&priv->mutex);
9098
9099 if (!wrqu->essid.flags)
9100 {
9101 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9102 ipw_disassociate(priv);
9103 priv->config &= ~CFG_STATIC_ESSID;
9104 ipw_associate(priv);
9105 mutex_unlock(&priv->mutex);
9106 return 0;
9107 }
9108
9109 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9110
9111 priv->config |= CFG_STATIC_ESSID;
9112
9113 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9114 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9115 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9116 mutex_unlock(&priv->mutex);
9117 return 0;
9118 }
9119
9120 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9121 print_ssid(ssid, extra, length), length);
9122
9123 priv->essid_len = length;
9124 memcpy(priv->essid, extra, priv->essid_len);
9125
9126 /* Network configuration changed -- force [re]association */
9127 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9128 if (!ipw_disassociate(priv))
9129 ipw_associate(priv);
9130
9131 mutex_unlock(&priv->mutex);
9132 return 0;
9133 }
9134
9135 static int ipw_wx_get_essid(struct net_device *dev,
9136 struct iw_request_info *info,
9137 union iwreq_data *wrqu, char *extra)
9138 {
9139 struct ipw_priv *priv = libipw_priv(dev);
9140 DECLARE_SSID_BUF(ssid);
9141
9142 /* If we are associated, trying to associate, or have a statically
9143 * configured ESSID then return that; otherwise return ANY */
9144 mutex_lock(&priv->mutex);
9145 if (priv->config & CFG_STATIC_ESSID ||
9146 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9147 IPW_DEBUG_WX("Getting essid: '%s'\n",
9148 print_ssid(ssid, priv->essid, priv->essid_len));
9149 memcpy(extra, priv->essid, priv->essid_len);
9150 wrqu->essid.length = priv->essid_len;
9151 wrqu->essid.flags = 1; /* active */
9152 } else {
9153 IPW_DEBUG_WX("Getting essid: ANY\n");
9154 wrqu->essid.length = 0;
9155 wrqu->essid.flags = 0; /* active */
9156 }
9157 mutex_unlock(&priv->mutex);
9158 return 0;
9159 }
9160
9161 static int ipw_wx_set_nick(struct net_device *dev,
9162 struct iw_request_info *info,
9163 union iwreq_data *wrqu, char *extra)
9164 {
9165 struct ipw_priv *priv = libipw_priv(dev);
9166
9167 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9168 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9169 return -E2BIG;
9170 mutex_lock(&priv->mutex);
9171 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9172 memset(priv->nick, 0, sizeof(priv->nick));
9173 memcpy(priv->nick, extra, wrqu->data.length);
9174 IPW_DEBUG_TRACE("<<\n");
9175 mutex_unlock(&priv->mutex);
9176 return 0;
9177
9178 }
9179
9180 static int ipw_wx_get_nick(struct net_device *dev,
9181 struct iw_request_info *info,
9182 union iwreq_data *wrqu, char *extra)
9183 {
9184 struct ipw_priv *priv = libipw_priv(dev);
9185 IPW_DEBUG_WX("Getting nick\n");
9186 mutex_lock(&priv->mutex);
9187 wrqu->data.length = strlen(priv->nick);
9188 memcpy(extra, priv->nick, wrqu->data.length);
9189 wrqu->data.flags = 1; /* active */
9190 mutex_unlock(&priv->mutex);
9191 return 0;
9192 }
9193
9194 static int ipw_wx_set_sens(struct net_device *dev,
9195 struct iw_request_info *info,
9196 union iwreq_data *wrqu, char *extra)
9197 {
9198 struct ipw_priv *priv = libipw_priv(dev);
9199 int err = 0;
9200
9201 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9202 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9203 mutex_lock(&priv->mutex);
9204
9205 if (wrqu->sens.fixed == 0)
9206 {
9207 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9208 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9209 goto out;
9210 }
9211 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9212 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9213 err = -EINVAL;
9214 goto out;
9215 }
9216
9217 priv->roaming_threshold = wrqu->sens.value;
9218 priv->disassociate_threshold = 3*wrqu->sens.value;
9219 out:
9220 mutex_unlock(&priv->mutex);
9221 return err;
9222 }
9223
9224 static int ipw_wx_get_sens(struct net_device *dev,
9225 struct iw_request_info *info,
9226 union iwreq_data *wrqu, char *extra)
9227 {
9228 struct ipw_priv *priv = libipw_priv(dev);
9229 mutex_lock(&priv->mutex);
9230 wrqu->sens.fixed = 1;
9231 wrqu->sens.value = priv->roaming_threshold;
9232 mutex_unlock(&priv->mutex);
9233
9234 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9235 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9236
9237 return 0;
9238 }
9239
9240 static int ipw_wx_set_rate(struct net_device *dev,
9241 struct iw_request_info *info,
9242 union iwreq_data *wrqu, char *extra)
9243 {
9244 /* TODO: We should use semaphores or locks for access to priv */
9245 struct ipw_priv *priv = libipw_priv(dev);
9246 u32 target_rate = wrqu->bitrate.value;
9247 u32 fixed, mask;
9248
9249 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9250 /* value = X, fixed = 1 means only rate X */
9251 /* value = X, fixed = 0 means all rates lower equal X */
9252
9253 if (target_rate == -1) {
9254 fixed = 0;
9255 mask = LIBIPW_DEFAULT_RATES_MASK;
9256 /* Now we should reassociate */
9257 goto apply;
9258 }
9259
9260 mask = 0;
9261 fixed = wrqu->bitrate.fixed;
9262
9263 if (target_rate == 1000000 || !fixed)
9264 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9265 if (target_rate == 1000000)
9266 goto apply;
9267
9268 if (target_rate == 2000000 || !fixed)
9269 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9270 if (target_rate == 2000000)
9271 goto apply;
9272
9273 if (target_rate == 5500000 || !fixed)
9274 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9275 if (target_rate == 5500000)
9276 goto apply;
9277
9278 if (target_rate == 6000000 || !fixed)
9279 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9280 if (target_rate == 6000000)
9281 goto apply;
9282
9283 if (target_rate == 9000000 || !fixed)
9284 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9285 if (target_rate == 9000000)
9286 goto apply;
9287
9288 if (target_rate == 11000000 || !fixed)
9289 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9290 if (target_rate == 11000000)
9291 goto apply;
9292
9293 if (target_rate == 12000000 || !fixed)
9294 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9295 if (target_rate == 12000000)
9296 goto apply;
9297
9298 if (target_rate == 18000000 || !fixed)
9299 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9300 if (target_rate == 18000000)
9301 goto apply;
9302
9303 if (target_rate == 24000000 || !fixed)
9304 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9305 if (target_rate == 24000000)
9306 goto apply;
9307
9308 if (target_rate == 36000000 || !fixed)
9309 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9310 if (target_rate == 36000000)
9311 goto apply;
9312
9313 if (target_rate == 48000000 || !fixed)
9314 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9315 if (target_rate == 48000000)
9316 goto apply;
9317
9318 if (target_rate == 54000000 || !fixed)
9319 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9320 if (target_rate == 54000000)
9321 goto apply;
9322
9323 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9324 return -EINVAL;
9325
9326 apply:
9327 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9328 mask, fixed ? "fixed" : "sub-rates");
9329 mutex_lock(&priv->mutex);
9330 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9331 priv->config &= ~CFG_FIXED_RATE;
9332 ipw_set_fixed_rate(priv, priv->ieee->mode);
9333 } else
9334 priv->config |= CFG_FIXED_RATE;
9335
9336 if (priv->rates_mask == mask) {
9337 IPW_DEBUG_WX("Mask set to current mask.\n");
9338 mutex_unlock(&priv->mutex);
9339 return 0;
9340 }
9341
9342 priv->rates_mask = mask;
9343
9344 /* Network configuration changed -- force [re]association */
9345 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9346 if (!ipw_disassociate(priv))
9347 ipw_associate(priv);
9348
9349 mutex_unlock(&priv->mutex);
9350 return 0;
9351 }
9352
9353 static int ipw_wx_get_rate(struct net_device *dev,
9354 struct iw_request_info *info,
9355 union iwreq_data *wrqu, char *extra)
9356 {
9357 struct ipw_priv *priv = libipw_priv(dev);
9358 mutex_lock(&priv->mutex);
9359 wrqu->bitrate.value = priv->last_rate;
9360 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9361 mutex_unlock(&priv->mutex);
9362 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9363 return 0;
9364 }
9365
9366 static int ipw_wx_set_rts(struct net_device *dev,
9367 struct iw_request_info *info,
9368 union iwreq_data *wrqu, char *extra)
9369 {
9370 struct ipw_priv *priv = libipw_priv(dev);
9371 mutex_lock(&priv->mutex);
9372 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9373 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9374 else {
9375 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9376 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9377 mutex_unlock(&priv->mutex);
9378 return -EINVAL;
9379 }
9380 priv->rts_threshold = wrqu->rts.value;
9381 }
9382
9383 ipw_send_rts_threshold(priv, priv->rts_threshold);
9384 mutex_unlock(&priv->mutex);
9385 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9386 return 0;
9387 }
9388
9389 static int ipw_wx_get_rts(struct net_device *dev,
9390 struct iw_request_info *info,
9391 union iwreq_data *wrqu, char *extra)
9392 {
9393 struct ipw_priv *priv = libipw_priv(dev);
9394 mutex_lock(&priv->mutex);
9395 wrqu->rts.value = priv->rts_threshold;
9396 wrqu->rts.fixed = 0; /* no auto select */
9397 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9398 mutex_unlock(&priv->mutex);
9399 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9400 return 0;
9401 }
9402
9403 static int ipw_wx_set_txpow(struct net_device *dev,
9404 struct iw_request_info *info,
9405 union iwreq_data *wrqu, char *extra)
9406 {
9407 struct ipw_priv *priv = libipw_priv(dev);
9408 int err = 0;
9409
9410 mutex_lock(&priv->mutex);
9411 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9412 err = -EINPROGRESS;
9413 goto out;
9414 }
9415
9416 if (!wrqu->power.fixed)
9417 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9418
9419 if (wrqu->power.flags != IW_TXPOW_DBM) {
9420 err = -EINVAL;
9421 goto out;
9422 }
9423
9424 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9425 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9426 err = -EINVAL;
9427 goto out;
9428 }
9429
9430 priv->tx_power = wrqu->power.value;
9431 err = ipw_set_tx_power(priv);
9432 out:
9433 mutex_unlock(&priv->mutex);
9434 return err;
9435 }
9436
9437 static int ipw_wx_get_txpow(struct net_device *dev,
9438 struct iw_request_info *info,
9439 union iwreq_data *wrqu, char *extra)
9440 {
9441 struct ipw_priv *priv = libipw_priv(dev);
9442 mutex_lock(&priv->mutex);
9443 wrqu->power.value = priv->tx_power;
9444 wrqu->power.fixed = 1;
9445 wrqu->power.flags = IW_TXPOW_DBM;
9446 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9447 mutex_unlock(&priv->mutex);
9448
9449 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9450 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9451
9452 return 0;
9453 }
9454
9455 static int ipw_wx_set_frag(struct net_device *dev,
9456 struct iw_request_info *info,
9457 union iwreq_data *wrqu, char *extra)
9458 {
9459 struct ipw_priv *priv = libipw_priv(dev);
9460 mutex_lock(&priv->mutex);
9461 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9462 priv->ieee->fts = DEFAULT_FTS;
9463 else {
9464 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9465 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9466 mutex_unlock(&priv->mutex);
9467 return -EINVAL;
9468 }
9469
9470 priv->ieee->fts = wrqu->frag.value & ~0x1;
9471 }
9472
9473 ipw_send_frag_threshold(priv, wrqu->frag.value);
9474 mutex_unlock(&priv->mutex);
9475 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9476 return 0;
9477 }
9478
9479 static int ipw_wx_get_frag(struct net_device *dev,
9480 struct iw_request_info *info,
9481 union iwreq_data *wrqu, char *extra)
9482 {
9483 struct ipw_priv *priv = libipw_priv(dev);
9484 mutex_lock(&priv->mutex);
9485 wrqu->frag.value = priv->ieee->fts;
9486 wrqu->frag.fixed = 0; /* no auto select */
9487 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9488 mutex_unlock(&priv->mutex);
9489 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9490
9491 return 0;
9492 }
9493
9494 static int ipw_wx_set_retry(struct net_device *dev,
9495 struct iw_request_info *info,
9496 union iwreq_data *wrqu, char *extra)
9497 {
9498 struct ipw_priv *priv = libipw_priv(dev);
9499
9500 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9501 return -EINVAL;
9502
9503 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9504 return 0;
9505
9506 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9507 return -EINVAL;
9508
9509 mutex_lock(&priv->mutex);
9510 if (wrqu->retry.flags & IW_RETRY_SHORT)
9511 priv->short_retry_limit = (u8) wrqu->retry.value;
9512 else if (wrqu->retry.flags & IW_RETRY_LONG)
9513 priv->long_retry_limit = (u8) wrqu->retry.value;
9514 else {
9515 priv->short_retry_limit = (u8) wrqu->retry.value;
9516 priv->long_retry_limit = (u8) wrqu->retry.value;
9517 }
9518
9519 ipw_send_retry_limit(priv, priv->short_retry_limit,
9520 priv->long_retry_limit);
9521 mutex_unlock(&priv->mutex);
9522 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9523 priv->short_retry_limit, priv->long_retry_limit);
9524 return 0;
9525 }
9526
9527 static int ipw_wx_get_retry(struct net_device *dev,
9528 struct iw_request_info *info,
9529 union iwreq_data *wrqu, char *extra)
9530 {
9531 struct ipw_priv *priv = libipw_priv(dev);
9532
9533 mutex_lock(&priv->mutex);
9534 wrqu->retry.disabled = 0;
9535
9536 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9537 mutex_unlock(&priv->mutex);
9538 return -EINVAL;
9539 }
9540
9541 if (wrqu->retry.flags & IW_RETRY_LONG) {
9542 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9543 wrqu->retry.value = priv->long_retry_limit;
9544 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9545 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9546 wrqu->retry.value = priv->short_retry_limit;
9547 } else {
9548 wrqu->retry.flags = IW_RETRY_LIMIT;
9549 wrqu->retry.value = priv->short_retry_limit;
9550 }
9551 mutex_unlock(&priv->mutex);
9552
9553 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9554
9555 return 0;
9556 }
9557
9558 static int ipw_wx_set_scan(struct net_device *dev,
9559 struct iw_request_info *info,
9560 union iwreq_data *wrqu, char *extra)
9561 {
9562 struct ipw_priv *priv = libipw_priv(dev);
9563 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9564 struct delayed_work *work = NULL;
9565
9566 mutex_lock(&priv->mutex);
9567
9568 priv->user_requested_scan = 1;
9569
9570 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9571 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9572 int len = min((int)req->essid_len,
9573 (int)sizeof(priv->direct_scan_ssid));
9574 memcpy(priv->direct_scan_ssid, req->essid, len);
9575 priv->direct_scan_ssid_len = len;
9576 work = &priv->request_direct_scan;
9577 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9578 work = &priv->request_passive_scan;
9579 }
9580 } else {
9581 /* Normal active broadcast scan */
9582 work = &priv->request_scan;
9583 }
9584
9585 mutex_unlock(&priv->mutex);
9586
9587 IPW_DEBUG_WX("Start scan\n");
9588
9589 schedule_delayed_work(work, 0);
9590
9591 return 0;
9592 }
9593
9594 static int ipw_wx_get_scan(struct net_device *dev,
9595 struct iw_request_info *info,
9596 union iwreq_data *wrqu, char *extra)
9597 {
9598 struct ipw_priv *priv = libipw_priv(dev);
9599 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9600 }
9601
9602 static int ipw_wx_set_encode(struct net_device *dev,
9603 struct iw_request_info *info,
9604 union iwreq_data *wrqu, char *key)
9605 {
9606 struct ipw_priv *priv = libipw_priv(dev);
9607 int ret;
9608 u32 cap = priv->capability;
9609
9610 mutex_lock(&priv->mutex);
9611 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9612
9613 /* In IBSS mode, we need to notify the firmware to update
9614 * the beacon info after we changed the capability. */
9615 if (cap != priv->capability &&
9616 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9617 priv->status & STATUS_ASSOCIATED)
9618 ipw_disassociate(priv);
9619
9620 mutex_unlock(&priv->mutex);
9621 return ret;
9622 }
9623
9624 static int ipw_wx_get_encode(struct net_device *dev,
9625 struct iw_request_info *info,
9626 union iwreq_data *wrqu, char *key)
9627 {
9628 struct ipw_priv *priv = libipw_priv(dev);
9629 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9630 }
9631
9632 static int ipw_wx_set_power(struct net_device *dev,
9633 struct iw_request_info *info,
9634 union iwreq_data *wrqu, char *extra)
9635 {
9636 struct ipw_priv *priv = libipw_priv(dev);
9637 int err;
9638 mutex_lock(&priv->mutex);
9639 if (wrqu->power.disabled) {
9640 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9641 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9642 if (err) {
9643 IPW_DEBUG_WX("failed setting power mode.\n");
9644 mutex_unlock(&priv->mutex);
9645 return err;
9646 }
9647 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9648 mutex_unlock(&priv->mutex);
9649 return 0;
9650 }
9651
9652 switch (wrqu->power.flags & IW_POWER_MODE) {
9653 case IW_POWER_ON: /* If not specified */
9654 case IW_POWER_MODE: /* If set all mask */
9655 case IW_POWER_ALL_R: /* If explicitly state all */
9656 break;
9657 default: /* Otherwise we don't support it */
9658 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9659 wrqu->power.flags);
9660 mutex_unlock(&priv->mutex);
9661 return -EOPNOTSUPP;
9662 }
9663
9664 /* If the user hasn't specified a power management mode yet, default
9665 * to BATTERY */
9666 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9667 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9668 else
9669 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9670
9671 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9672 if (err) {
9673 IPW_DEBUG_WX("failed setting power mode.\n");
9674 mutex_unlock(&priv->mutex);
9675 return err;
9676 }
9677
9678 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9679 mutex_unlock(&priv->mutex);
9680 return 0;
9681 }
9682
9683 static int ipw_wx_get_power(struct net_device *dev,
9684 struct iw_request_info *info,
9685 union iwreq_data *wrqu, char *extra)
9686 {
9687 struct ipw_priv *priv = libipw_priv(dev);
9688 mutex_lock(&priv->mutex);
9689 if (!(priv->power_mode & IPW_POWER_ENABLED))
9690 wrqu->power.disabled = 1;
9691 else
9692 wrqu->power.disabled = 0;
9693
9694 mutex_unlock(&priv->mutex);
9695 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9696
9697 return 0;
9698 }
9699
9700 static int ipw_wx_set_powermode(struct net_device *dev,
9701 struct iw_request_info *info,
9702 union iwreq_data *wrqu, char *extra)
9703 {
9704 struct ipw_priv *priv = libipw_priv(dev);
9705 int mode = *(int *)extra;
9706 int err;
9707
9708 mutex_lock(&priv->mutex);
9709 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9710 mode = IPW_POWER_AC;
9711
9712 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9713 err = ipw_send_power_mode(priv, mode);
9714 if (err) {
9715 IPW_DEBUG_WX("failed setting power mode.\n");
9716 mutex_unlock(&priv->mutex);
9717 return err;
9718 }
9719 priv->power_mode = IPW_POWER_ENABLED | mode;
9720 }
9721 mutex_unlock(&priv->mutex);
9722 return 0;
9723 }
9724
9725 #define MAX_WX_STRING 80
9726 static int ipw_wx_get_powermode(struct net_device *dev,
9727 struct iw_request_info *info,
9728 union iwreq_data *wrqu, char *extra)
9729 {
9730 struct ipw_priv *priv = libipw_priv(dev);
9731 int level = IPW_POWER_LEVEL(priv->power_mode);
9732 char *p = extra;
9733
9734 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9735
9736 switch (level) {
9737 case IPW_POWER_AC:
9738 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9739 break;
9740 case IPW_POWER_BATTERY:
9741 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9742 break;
9743 default:
9744 p += snprintf(p, MAX_WX_STRING - (p - extra),
9745 "(Timeout %dms, Period %dms)",
9746 timeout_duration[level - 1] / 1000,
9747 period_duration[level - 1] / 1000);
9748 }
9749
9750 if (!(priv->power_mode & IPW_POWER_ENABLED))
9751 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9752
9753 wrqu->data.length = p - extra + 1;
9754
9755 return 0;
9756 }
9757
9758 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9759 struct iw_request_info *info,
9760 union iwreq_data *wrqu, char *extra)
9761 {
9762 struct ipw_priv *priv = libipw_priv(dev);
9763 int mode = *(int *)extra;
9764 u8 band = 0, modulation = 0;
9765
9766 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9767 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9768 return -EINVAL;
9769 }
9770 mutex_lock(&priv->mutex);
9771 if (priv->adapter == IPW_2915ABG) {
9772 priv->ieee->abg_true = 1;
9773 if (mode & IEEE_A) {
9774 band |= LIBIPW_52GHZ_BAND;
9775 modulation |= LIBIPW_OFDM_MODULATION;
9776 } else
9777 priv->ieee->abg_true = 0;
9778 } else {
9779 if (mode & IEEE_A) {
9780 IPW_WARNING("Attempt to set 2200BG into "
9781 "802.11a mode\n");
9782 mutex_unlock(&priv->mutex);
9783 return -EINVAL;
9784 }
9785
9786 priv->ieee->abg_true = 0;
9787 }
9788
9789 if (mode & IEEE_B) {
9790 band |= LIBIPW_24GHZ_BAND;
9791 modulation |= LIBIPW_CCK_MODULATION;
9792 } else
9793 priv->ieee->abg_true = 0;
9794
9795 if (mode & IEEE_G) {
9796 band |= LIBIPW_24GHZ_BAND;
9797 modulation |= LIBIPW_OFDM_MODULATION;
9798 } else
9799 priv->ieee->abg_true = 0;
9800
9801 priv->ieee->mode = mode;
9802 priv->ieee->freq_band = band;
9803 priv->ieee->modulation = modulation;
9804 init_supported_rates(priv, &priv->rates);
9805
9806 /* Network configuration changed -- force [re]association */
9807 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9808 if (!ipw_disassociate(priv)) {
9809 ipw_send_supported_rates(priv, &priv->rates);
9810 ipw_associate(priv);
9811 }
9812
9813 /* Update the band LEDs */
9814 ipw_led_band_on(priv);
9815
9816 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9817 mode & IEEE_A ? 'a' : '.',
9818 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9819 mutex_unlock(&priv->mutex);
9820 return 0;
9821 }
9822
9823 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9824 struct iw_request_info *info,
9825 union iwreq_data *wrqu, char *extra)
9826 {
9827 struct ipw_priv *priv = libipw_priv(dev);
9828 mutex_lock(&priv->mutex);
9829 switch (priv->ieee->mode) {
9830 case IEEE_A:
9831 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9832 break;
9833 case IEEE_B:
9834 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9835 break;
9836 case IEEE_A | IEEE_B:
9837 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9838 break;
9839 case IEEE_G:
9840 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9841 break;
9842 case IEEE_A | IEEE_G:
9843 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9844 break;
9845 case IEEE_B | IEEE_G:
9846 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9847 break;
9848 case IEEE_A | IEEE_B | IEEE_G:
9849 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9850 break;
9851 default:
9852 strncpy(extra, "unknown", MAX_WX_STRING);
9853 break;
9854 }
9855
9856 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9857
9858 wrqu->data.length = strlen(extra) + 1;
9859 mutex_unlock(&priv->mutex);
9860
9861 return 0;
9862 }
9863
9864 static int ipw_wx_set_preamble(struct net_device *dev,
9865 struct iw_request_info *info,
9866 union iwreq_data *wrqu, char *extra)
9867 {
9868 struct ipw_priv *priv = libipw_priv(dev);
9869 int mode = *(int *)extra;
9870 mutex_lock(&priv->mutex);
9871 /* Switching from SHORT -> LONG requires a disassociation */
9872 if (mode == 1) {
9873 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9874 priv->config |= CFG_PREAMBLE_LONG;
9875
9876 /* Network configuration changed -- force [re]association */
9877 IPW_DEBUG_ASSOC
9878 ("[re]association triggered due to preamble change.\n");
9879 if (!ipw_disassociate(priv))
9880 ipw_associate(priv);
9881 }
9882 goto done;
9883 }
9884
9885 if (mode == 0) {
9886 priv->config &= ~CFG_PREAMBLE_LONG;
9887 goto done;
9888 }
9889 mutex_unlock(&priv->mutex);
9890 return -EINVAL;
9891
9892 done:
9893 mutex_unlock(&priv->mutex);
9894 return 0;
9895 }
9896
9897 static int ipw_wx_get_preamble(struct net_device *dev,
9898 struct iw_request_info *info,
9899 union iwreq_data *wrqu, char *extra)
9900 {
9901 struct ipw_priv *priv = libipw_priv(dev);
9902 mutex_lock(&priv->mutex);
9903 if (priv->config & CFG_PREAMBLE_LONG)
9904 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9905 else
9906 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9907 mutex_unlock(&priv->mutex);
9908 return 0;
9909 }
9910
9911 #ifdef CONFIG_IPW2200_MONITOR
9912 static int ipw_wx_set_monitor(struct net_device *dev,
9913 struct iw_request_info *info,
9914 union iwreq_data *wrqu, char *extra)
9915 {
9916 struct ipw_priv *priv = libipw_priv(dev);
9917 int *parms = (int *)extra;
9918 int enable = (parms[0] > 0);
9919 mutex_lock(&priv->mutex);
9920 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9921 if (enable) {
9922 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9923 #ifdef CONFIG_IPW2200_RADIOTAP
9924 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9925 #else
9926 priv->net_dev->type = ARPHRD_IEEE80211;
9927 #endif
9928 schedule_work(&priv->adapter_restart);
9929 }
9930
9931 ipw_set_channel(priv, parms[1]);
9932 } else {
9933 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9934 mutex_unlock(&priv->mutex);
9935 return 0;
9936 }
9937 priv->net_dev->type = ARPHRD_ETHER;
9938 schedule_work(&priv->adapter_restart);
9939 }
9940 mutex_unlock(&priv->mutex);
9941 return 0;
9942 }
9943
9944 #endif /* CONFIG_IPW2200_MONITOR */
9945
9946 static int ipw_wx_reset(struct net_device *dev,
9947 struct iw_request_info *info,
9948 union iwreq_data *wrqu, char *extra)
9949 {
9950 struct ipw_priv *priv = libipw_priv(dev);
9951 IPW_DEBUG_WX("RESET\n");
9952 schedule_work(&priv->adapter_restart);
9953 return 0;
9954 }
9955
9956 static int ipw_wx_sw_reset(struct net_device *dev,
9957 struct iw_request_info *info,
9958 union iwreq_data *wrqu, char *extra)
9959 {
9960 struct ipw_priv *priv = libipw_priv(dev);
9961 union iwreq_data wrqu_sec = {
9962 .encoding = {
9963 .flags = IW_ENCODE_DISABLED,
9964 },
9965 };
9966 int ret;
9967
9968 IPW_DEBUG_WX("SW_RESET\n");
9969
9970 mutex_lock(&priv->mutex);
9971
9972 ret = ipw_sw_reset(priv, 2);
9973 if (!ret) {
9974 free_firmware();
9975 ipw_adapter_restart(priv);
9976 }
9977
9978 /* The SW reset bit might have been toggled on by the 'disable'
9979 * module parameter, so take appropriate action */
9980 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9981
9982 mutex_unlock(&priv->mutex);
9983 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9984 mutex_lock(&priv->mutex);
9985
9986 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9987 /* Configuration likely changed -- force [re]association */
9988 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9989 "reset.\n");
9990 if (!ipw_disassociate(priv))
9991 ipw_associate(priv);
9992 }
9993
9994 mutex_unlock(&priv->mutex);
9995
9996 return 0;
9997 }
9998
9999 /* Rebase the WE IOCTLs to zero for the handler array */
10000 static iw_handler ipw_wx_handlers[] = {
10001 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10002 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10003 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10004 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10005 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10006 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10007 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10008 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10009 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10010 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10011 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10012 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10013 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10014 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10015 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10016 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10017 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10018 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10019 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10020 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10021 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10022 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10023 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10024 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10025 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10026 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10027 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10028 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10029 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10030 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10031 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10032 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10033 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10034 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10035 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10036 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10037 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10038 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10039 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10040 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10041 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10042 };
10043
10044 enum {
10045 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10046 IPW_PRIV_GET_POWER,
10047 IPW_PRIV_SET_MODE,
10048 IPW_PRIV_GET_MODE,
10049 IPW_PRIV_SET_PREAMBLE,
10050 IPW_PRIV_GET_PREAMBLE,
10051 IPW_PRIV_RESET,
10052 IPW_PRIV_SW_RESET,
10053 #ifdef CONFIG_IPW2200_MONITOR
10054 IPW_PRIV_SET_MONITOR,
10055 #endif
10056 };
10057
10058 static struct iw_priv_args ipw_priv_args[] = {
10059 {
10060 .cmd = IPW_PRIV_SET_POWER,
10061 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10062 .name = "set_power"},
10063 {
10064 .cmd = IPW_PRIV_GET_POWER,
10065 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10066 .name = "get_power"},
10067 {
10068 .cmd = IPW_PRIV_SET_MODE,
10069 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10070 .name = "set_mode"},
10071 {
10072 .cmd = IPW_PRIV_GET_MODE,
10073 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10074 .name = "get_mode"},
10075 {
10076 .cmd = IPW_PRIV_SET_PREAMBLE,
10077 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10078 .name = "set_preamble"},
10079 {
10080 .cmd = IPW_PRIV_GET_PREAMBLE,
10081 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10082 .name = "get_preamble"},
10083 {
10084 IPW_PRIV_RESET,
10085 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10086 {
10087 IPW_PRIV_SW_RESET,
10088 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10089 #ifdef CONFIG_IPW2200_MONITOR
10090 {
10091 IPW_PRIV_SET_MONITOR,
10092 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10093 #endif /* CONFIG_IPW2200_MONITOR */
10094 };
10095
10096 static iw_handler ipw_priv_handler[] = {
10097 ipw_wx_set_powermode,
10098 ipw_wx_get_powermode,
10099 ipw_wx_set_wireless_mode,
10100 ipw_wx_get_wireless_mode,
10101 ipw_wx_set_preamble,
10102 ipw_wx_get_preamble,
10103 ipw_wx_reset,
10104 ipw_wx_sw_reset,
10105 #ifdef CONFIG_IPW2200_MONITOR
10106 ipw_wx_set_monitor,
10107 #endif
10108 };
10109
10110 static struct iw_handler_def ipw_wx_handler_def = {
10111 .standard = ipw_wx_handlers,
10112 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10113 .num_private = ARRAY_SIZE(ipw_priv_handler),
10114 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10115 .private = ipw_priv_handler,
10116 .private_args = ipw_priv_args,
10117 .get_wireless_stats = ipw_get_wireless_stats,
10118 };
10119
10120 /*
10121 * Get wireless statistics.
10122 * Called by /proc/net/wireless
10123 * Also called by SIOCGIWSTATS
10124 */
10125 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10126 {
10127 struct ipw_priv *priv = libipw_priv(dev);
10128 struct iw_statistics *wstats;
10129
10130 wstats = &priv->wstats;
10131
10132 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10133 * netdev->get_wireless_stats seems to be called before fw is
10134 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10135 * and associated; if not associcated, the values are all meaningless
10136 * anyway, so set them all to NULL and INVALID */
10137 if (!(priv->status & STATUS_ASSOCIATED)) {
10138 wstats->miss.beacon = 0;
10139 wstats->discard.retries = 0;
10140 wstats->qual.qual = 0;
10141 wstats->qual.level = 0;
10142 wstats->qual.noise = 0;
10143 wstats->qual.updated = 7;
10144 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10145 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10146 return wstats;
10147 }
10148
10149 wstats->qual.qual = priv->quality;
10150 wstats->qual.level = priv->exp_avg_rssi;
10151 wstats->qual.noise = priv->exp_avg_noise;
10152 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10153 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10154
10155 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10156 wstats->discard.retries = priv->last_tx_failures;
10157 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10158
10159 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10160 goto fail_get_ordinal;
10161 wstats->discard.retries += tx_retry; */
10162
10163 return wstats;
10164 }
10165
10166 /* net device stuff */
10167
10168 static void init_sys_config(struct ipw_sys_config *sys_config)
10169 {
10170 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10171 sys_config->bt_coexistence = 0;
10172 sys_config->answer_broadcast_ssid_probe = 0;
10173 sys_config->accept_all_data_frames = 0;
10174 sys_config->accept_non_directed_frames = 1;
10175 sys_config->exclude_unicast_unencrypted = 0;
10176 sys_config->disable_unicast_decryption = 1;
10177 sys_config->exclude_multicast_unencrypted = 0;
10178 sys_config->disable_multicast_decryption = 1;
10179 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10180 antenna = CFG_SYS_ANTENNA_BOTH;
10181 sys_config->antenna_diversity = antenna;
10182 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10183 sys_config->dot11g_auto_detection = 0;
10184 sys_config->enable_cts_to_self = 0;
10185 sys_config->bt_coexist_collision_thr = 0;
10186 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10187 sys_config->silence_threshold = 0x1e;
10188 }
10189
10190 static int ipw_net_open(struct net_device *dev)
10191 {
10192 IPW_DEBUG_INFO("dev->open\n");
10193 netif_start_queue(dev);
10194 return 0;
10195 }
10196
10197 static int ipw_net_stop(struct net_device *dev)
10198 {
10199 IPW_DEBUG_INFO("dev->close\n");
10200 netif_stop_queue(dev);
10201 return 0;
10202 }
10203
10204 /*
10205 todo:
10206
10207 modify to send one tfd per fragment instead of using chunking. otherwise
10208 we need to heavily modify the libipw_skb_to_txb.
10209 */
10210
10211 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10212 int pri)
10213 {
10214 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10215 txb->fragments[0]->data;
10216 int i = 0;
10217 struct tfd_frame *tfd;
10218 #ifdef CONFIG_IPW2200_QOS
10219 int tx_id = ipw_get_tx_queue_number(priv, pri);
10220 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10221 #else
10222 struct clx2_tx_queue *txq = &priv->txq[0];
10223 #endif
10224 struct clx2_queue *q = &txq->q;
10225 u8 id, hdr_len, unicast;
10226 int fc;
10227
10228 if (!(priv->status & STATUS_ASSOCIATED))
10229 goto drop;
10230
10231 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10232 switch (priv->ieee->iw_mode) {
10233 case IW_MODE_ADHOC:
10234 unicast = !is_multicast_ether_addr(hdr->addr1);
10235 id = ipw_find_station(priv, hdr->addr1);
10236 if (id == IPW_INVALID_STATION) {
10237 id = ipw_add_station(priv, hdr->addr1);
10238 if (id == IPW_INVALID_STATION) {
10239 IPW_WARNING("Attempt to send data to "
10240 "invalid cell: %pM\n",
10241 hdr->addr1);
10242 goto drop;
10243 }
10244 }
10245 break;
10246
10247 case IW_MODE_INFRA:
10248 default:
10249 unicast = !is_multicast_ether_addr(hdr->addr3);
10250 id = 0;
10251 break;
10252 }
10253
10254 tfd = &txq->bd[q->first_empty];
10255 txq->txb[q->first_empty] = txb;
10256 memset(tfd, 0, sizeof(*tfd));
10257 tfd->u.data.station_number = id;
10258
10259 tfd->control_flags.message_type = TX_FRAME_TYPE;
10260 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10261
10262 tfd->u.data.cmd_id = DINO_CMD_TX;
10263 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10264
10265 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10266 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10267 else
10268 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10269
10270 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10271 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10272
10273 fc = le16_to_cpu(hdr->frame_ctl);
10274 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10275
10276 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10277
10278 if (likely(unicast))
10279 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10280
10281 if (txb->encrypted && !priv->ieee->host_encrypt) {
10282 switch (priv->ieee->sec.level) {
10283 case SEC_LEVEL_3:
10284 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10285 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10286 /* XXX: ACK flag must be set for CCMP even if it
10287 * is a multicast/broadcast packet, because CCMP
10288 * group communication encrypted by GTK is
10289 * actually done by the AP. */
10290 if (!unicast)
10291 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10292
10293 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10294 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10295 tfd->u.data.key_index = 0;
10296 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10297 break;
10298 case SEC_LEVEL_2:
10299 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10300 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10301 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10302 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10303 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10304 break;
10305 case SEC_LEVEL_1:
10306 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10307 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10308 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10309 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10310 40)
10311 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10312 else
10313 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10314 break;
10315 case SEC_LEVEL_0:
10316 break;
10317 default:
10318 printk(KERN_ERR "Unknown security level %d\n",
10319 priv->ieee->sec.level);
10320 break;
10321 }
10322 } else
10323 /* No hardware encryption */
10324 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10325
10326 #ifdef CONFIG_IPW2200_QOS
10327 if (fc & IEEE80211_STYPE_QOS_DATA)
10328 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10329 #endif /* CONFIG_IPW2200_QOS */
10330
10331 /* payload */
10332 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10333 txb->nr_frags));
10334 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10335 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10336 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10337 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10338 i, le32_to_cpu(tfd->u.data.num_chunks),
10339 txb->fragments[i]->len - hdr_len);
10340 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10341 i, tfd->u.data.num_chunks,
10342 txb->fragments[i]->len - hdr_len);
10343 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10344 txb->fragments[i]->len - hdr_len);
10345
10346 tfd->u.data.chunk_ptr[i] =
10347 cpu_to_le32(pci_map_single
10348 (priv->pci_dev,
10349 txb->fragments[i]->data + hdr_len,
10350 txb->fragments[i]->len - hdr_len,
10351 PCI_DMA_TODEVICE));
10352 tfd->u.data.chunk_len[i] =
10353 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10354 }
10355
10356 if (i != txb->nr_frags) {
10357 struct sk_buff *skb;
10358 u16 remaining_bytes = 0;
10359 int j;
10360
10361 for (j = i; j < txb->nr_frags; j++)
10362 remaining_bytes += txb->fragments[j]->len - hdr_len;
10363
10364 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10365 remaining_bytes);
10366 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10367 if (skb != NULL) {
10368 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10369 for (j = i; j < txb->nr_frags; j++) {
10370 int size = txb->fragments[j]->len - hdr_len;
10371
10372 printk(KERN_INFO "Adding frag %d %d...\n",
10373 j, size);
10374 memcpy(skb_put(skb, size),
10375 txb->fragments[j]->data + hdr_len, size);
10376 }
10377 dev_kfree_skb_any(txb->fragments[i]);
10378 txb->fragments[i] = skb;
10379 tfd->u.data.chunk_ptr[i] =
10380 cpu_to_le32(pci_map_single
10381 (priv->pci_dev, skb->data,
10382 remaining_bytes,
10383 PCI_DMA_TODEVICE));
10384
10385 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10386 }
10387 }
10388
10389 /* kick DMA */
10390 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10391 ipw_write32(priv, q->reg_w, q->first_empty);
10392
10393 if (ipw_tx_queue_space(q) < q->high_mark)
10394 netif_stop_queue(priv->net_dev);
10395
10396 return NETDEV_TX_OK;
10397
10398 drop:
10399 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10400 libipw_txb_free(txb);
10401 return NETDEV_TX_OK;
10402 }
10403
10404 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10405 {
10406 struct ipw_priv *priv = libipw_priv(dev);
10407 #ifdef CONFIG_IPW2200_QOS
10408 int tx_id = ipw_get_tx_queue_number(priv, pri);
10409 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10410 #else
10411 struct clx2_tx_queue *txq = &priv->txq[0];
10412 #endif /* CONFIG_IPW2200_QOS */
10413
10414 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10415 return 1;
10416
10417 return 0;
10418 }
10419
10420 #ifdef CONFIG_IPW2200_PROMISCUOUS
10421 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10422 struct libipw_txb *txb)
10423 {
10424 struct libipw_rx_stats dummystats;
10425 struct ieee80211_hdr *hdr;
10426 u8 n;
10427 u16 filter = priv->prom_priv->filter;
10428 int hdr_only = 0;
10429
10430 if (filter & IPW_PROM_NO_TX)
10431 return;
10432
10433 memset(&dummystats, 0, sizeof(dummystats));
10434
10435 /* Filtering of fragment chains is done against the first fragment */
10436 hdr = (void *)txb->fragments[0]->data;
10437 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10438 if (filter & IPW_PROM_NO_MGMT)
10439 return;
10440 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10441 hdr_only = 1;
10442 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10443 if (filter & IPW_PROM_NO_CTL)
10444 return;
10445 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10446 hdr_only = 1;
10447 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10448 if (filter & IPW_PROM_NO_DATA)
10449 return;
10450 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10451 hdr_only = 1;
10452 }
10453
10454 for(n=0; n<txb->nr_frags; ++n) {
10455 struct sk_buff *src = txb->fragments[n];
10456 struct sk_buff *dst;
10457 struct ieee80211_radiotap_header *rt_hdr;
10458 int len;
10459
10460 if (hdr_only) {
10461 hdr = (void *)src->data;
10462 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10463 } else
10464 len = src->len;
10465
10466 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10467 if (!dst)
10468 continue;
10469
10470 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10471
10472 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10473 rt_hdr->it_pad = 0;
10474 rt_hdr->it_present = 0; /* after all, it's just an idea */
10475 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10476
10477 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10478 ieee80211chan2mhz(priv->channel));
10479 if (priv->channel > 14) /* 802.11a */
10480 *(__le16*)skb_put(dst, sizeof(u16)) =
10481 cpu_to_le16(IEEE80211_CHAN_OFDM |
10482 IEEE80211_CHAN_5GHZ);
10483 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10484 *(__le16*)skb_put(dst, sizeof(u16)) =
10485 cpu_to_le16(IEEE80211_CHAN_CCK |
10486 IEEE80211_CHAN_2GHZ);
10487 else /* 802.11g */
10488 *(__le16*)skb_put(dst, sizeof(u16)) =
10489 cpu_to_le16(IEEE80211_CHAN_OFDM |
10490 IEEE80211_CHAN_2GHZ);
10491
10492 rt_hdr->it_len = cpu_to_le16(dst->len);
10493
10494 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10495
10496 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10497 dev_kfree_skb_any(dst);
10498 }
10499 }
10500 #endif
10501
10502 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10503 struct net_device *dev, int pri)
10504 {
10505 struct ipw_priv *priv = libipw_priv(dev);
10506 unsigned long flags;
10507 netdev_tx_t ret;
10508
10509 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10510 spin_lock_irqsave(&priv->lock, flags);
10511
10512 #ifdef CONFIG_IPW2200_PROMISCUOUS
10513 if (rtap_iface && netif_running(priv->prom_net_dev))
10514 ipw_handle_promiscuous_tx(priv, txb);
10515 #endif
10516
10517 ret = ipw_tx_skb(priv, txb, pri);
10518 if (ret == NETDEV_TX_OK)
10519 __ipw_led_activity_on(priv);
10520 spin_unlock_irqrestore(&priv->lock, flags);
10521
10522 return ret;
10523 }
10524
10525 static void ipw_net_set_multicast_list(struct net_device *dev)
10526 {
10527
10528 }
10529
10530 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10531 {
10532 struct ipw_priv *priv = libipw_priv(dev);
10533 struct sockaddr *addr = p;
10534
10535 if (!is_valid_ether_addr(addr->sa_data))
10536 return -EADDRNOTAVAIL;
10537 mutex_lock(&priv->mutex);
10538 priv->config |= CFG_CUSTOM_MAC;
10539 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10540 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10541 priv->net_dev->name, priv->mac_addr);
10542 schedule_work(&priv->adapter_restart);
10543 mutex_unlock(&priv->mutex);
10544 return 0;
10545 }
10546
10547 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10548 struct ethtool_drvinfo *info)
10549 {
10550 struct ipw_priv *p = libipw_priv(dev);
10551 char vers[64];
10552 char date[32];
10553 u32 len;
10554
10555 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10556 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10557
10558 len = sizeof(vers);
10559 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10560 len = sizeof(date);
10561 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10562
10563 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10564 vers, date);
10565 strlcpy(info->bus_info, pci_name(p->pci_dev),
10566 sizeof(info->bus_info));
10567 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10568 }
10569
10570 static u32 ipw_ethtool_get_link(struct net_device *dev)
10571 {
10572 struct ipw_priv *priv = libipw_priv(dev);
10573 return (priv->status & STATUS_ASSOCIATED) != 0;
10574 }
10575
10576 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10577 {
10578 return IPW_EEPROM_IMAGE_SIZE;
10579 }
10580
10581 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10582 struct ethtool_eeprom *eeprom, u8 * bytes)
10583 {
10584 struct ipw_priv *p = libipw_priv(dev);
10585
10586 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10587 return -EINVAL;
10588 mutex_lock(&p->mutex);
10589 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10590 mutex_unlock(&p->mutex);
10591 return 0;
10592 }
10593
10594 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10595 struct ethtool_eeprom *eeprom, u8 * bytes)
10596 {
10597 struct ipw_priv *p = libipw_priv(dev);
10598 int i;
10599
10600 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10601 return -EINVAL;
10602 mutex_lock(&p->mutex);
10603 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10604 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10605 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10606 mutex_unlock(&p->mutex);
10607 return 0;
10608 }
10609
10610 static const struct ethtool_ops ipw_ethtool_ops = {
10611 .get_link = ipw_ethtool_get_link,
10612 .get_drvinfo = ipw_ethtool_get_drvinfo,
10613 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10614 .get_eeprom = ipw_ethtool_get_eeprom,
10615 .set_eeprom = ipw_ethtool_set_eeprom,
10616 };
10617
10618 static irqreturn_t ipw_isr(int irq, void *data)
10619 {
10620 struct ipw_priv *priv = data;
10621 u32 inta, inta_mask;
10622
10623 if (!priv)
10624 return IRQ_NONE;
10625
10626 spin_lock(&priv->irq_lock);
10627
10628 if (!(priv->status & STATUS_INT_ENABLED)) {
10629 /* IRQ is disabled */
10630 goto none;
10631 }
10632
10633 inta = ipw_read32(priv, IPW_INTA_RW);
10634 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10635
10636 if (inta == 0xFFFFFFFF) {
10637 /* Hardware disappeared */
10638 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10639 goto none;
10640 }
10641
10642 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10643 /* Shared interrupt */
10644 goto none;
10645 }
10646
10647 /* tell the device to stop sending interrupts */
10648 __ipw_disable_interrupts(priv);
10649
10650 /* ack current interrupts */
10651 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10652 ipw_write32(priv, IPW_INTA_RW, inta);
10653
10654 /* Cache INTA value for our tasklet */
10655 priv->isr_inta = inta;
10656
10657 tasklet_schedule(&priv->irq_tasklet);
10658
10659 spin_unlock(&priv->irq_lock);
10660
10661 return IRQ_HANDLED;
10662 none:
10663 spin_unlock(&priv->irq_lock);
10664 return IRQ_NONE;
10665 }
10666
10667 static void ipw_rf_kill(void *adapter)
10668 {
10669 struct ipw_priv *priv = adapter;
10670 unsigned long flags;
10671
10672 spin_lock_irqsave(&priv->lock, flags);
10673
10674 if (rf_kill_active(priv)) {
10675 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10676 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10677 goto exit_unlock;
10678 }
10679
10680 /* RF Kill is now disabled, so bring the device back up */
10681
10682 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10683 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10684 "device\n");
10685
10686 /* we can not do an adapter restart while inside an irq lock */
10687 schedule_work(&priv->adapter_restart);
10688 } else
10689 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10690 "enabled\n");
10691
10692 exit_unlock:
10693 spin_unlock_irqrestore(&priv->lock, flags);
10694 }
10695
10696 static void ipw_bg_rf_kill(struct work_struct *work)
10697 {
10698 struct ipw_priv *priv =
10699 container_of(work, struct ipw_priv, rf_kill.work);
10700 mutex_lock(&priv->mutex);
10701 ipw_rf_kill(priv);
10702 mutex_unlock(&priv->mutex);
10703 }
10704
10705 static void ipw_link_up(struct ipw_priv *priv)
10706 {
10707 priv->last_seq_num = -1;
10708 priv->last_frag_num = -1;
10709 priv->last_packet_time = 0;
10710
10711 netif_carrier_on(priv->net_dev);
10712
10713 cancel_delayed_work(&priv->request_scan);
10714 cancel_delayed_work(&priv->request_direct_scan);
10715 cancel_delayed_work(&priv->request_passive_scan);
10716 cancel_delayed_work(&priv->scan_event);
10717 ipw_reset_stats(priv);
10718 /* Ensure the rate is updated immediately */
10719 priv->last_rate = ipw_get_current_rate(priv);
10720 ipw_gather_stats(priv);
10721 ipw_led_link_up(priv);
10722 notify_wx_assoc_event(priv);
10723
10724 if (priv->config & CFG_BACKGROUND_SCAN)
10725 schedule_delayed_work(&priv->request_scan, HZ);
10726 }
10727
10728 static void ipw_bg_link_up(struct work_struct *work)
10729 {
10730 struct ipw_priv *priv =
10731 container_of(work, struct ipw_priv, link_up);
10732 mutex_lock(&priv->mutex);
10733 ipw_link_up(priv);
10734 mutex_unlock(&priv->mutex);
10735 }
10736
10737 static void ipw_link_down(struct ipw_priv *priv)
10738 {
10739 ipw_led_link_down(priv);
10740 netif_carrier_off(priv->net_dev);
10741 notify_wx_assoc_event(priv);
10742
10743 /* Cancel any queued work ... */
10744 cancel_delayed_work(&priv->request_scan);
10745 cancel_delayed_work(&priv->request_direct_scan);
10746 cancel_delayed_work(&priv->request_passive_scan);
10747 cancel_delayed_work(&priv->adhoc_check);
10748 cancel_delayed_work(&priv->gather_stats);
10749
10750 ipw_reset_stats(priv);
10751
10752 if (!(priv->status & STATUS_EXIT_PENDING)) {
10753 /* Queue up another scan... */
10754 schedule_delayed_work(&priv->request_scan, 0);
10755 } else
10756 cancel_delayed_work(&priv->scan_event);
10757 }
10758
10759 static void ipw_bg_link_down(struct work_struct *work)
10760 {
10761 struct ipw_priv *priv =
10762 container_of(work, struct ipw_priv, link_down);
10763 mutex_lock(&priv->mutex);
10764 ipw_link_down(priv);
10765 mutex_unlock(&priv->mutex);
10766 }
10767
10768 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10769 {
10770 int ret = 0;
10771
10772 init_waitqueue_head(&priv->wait_command_queue);
10773 init_waitqueue_head(&priv->wait_state);
10774
10775 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10776 INIT_WORK(&priv->associate, ipw_bg_associate);
10777 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10778 INIT_WORK(&priv->system_config, ipw_system_config);
10779 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10780 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10781 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10782 INIT_WORK(&priv->up, ipw_bg_up);
10783 INIT_WORK(&priv->down, ipw_bg_down);
10784 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10785 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10786 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10787 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10788 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10789 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10790 INIT_WORK(&priv->roam, ipw_bg_roam);
10791 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10792 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10793 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10794 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10795 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10796 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10797 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10798
10799 #ifdef CONFIG_IPW2200_QOS
10800 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10801 #endif /* CONFIG_IPW2200_QOS */
10802
10803 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10804 ipw_irq_tasklet, (unsigned long)priv);
10805
10806 return ret;
10807 }
10808
10809 static void shim__set_security(struct net_device *dev,
10810 struct libipw_security *sec)
10811 {
10812 struct ipw_priv *priv = libipw_priv(dev);
10813 int i;
10814 for (i = 0; i < 4; i++) {
10815 if (sec->flags & (1 << i)) {
10816 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10817 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10818 if (sec->key_sizes[i] == 0)
10819 priv->ieee->sec.flags &= ~(1 << i);
10820 else {
10821 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10822 sec->key_sizes[i]);
10823 priv->ieee->sec.flags |= (1 << i);
10824 }
10825 priv->status |= STATUS_SECURITY_UPDATED;
10826 } else if (sec->level != SEC_LEVEL_1)
10827 priv->ieee->sec.flags &= ~(1 << i);
10828 }
10829
10830 if (sec->flags & SEC_ACTIVE_KEY) {
10831 if (sec->active_key <= 3) {
10832 priv->ieee->sec.active_key = sec->active_key;
10833 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10834 } else
10835 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10836 priv->status |= STATUS_SECURITY_UPDATED;
10837 } else
10838 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10839
10840 if ((sec->flags & SEC_AUTH_MODE) &&
10841 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10842 priv->ieee->sec.auth_mode = sec->auth_mode;
10843 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10844 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10845 priv->capability |= CAP_SHARED_KEY;
10846 else
10847 priv->capability &= ~CAP_SHARED_KEY;
10848 priv->status |= STATUS_SECURITY_UPDATED;
10849 }
10850
10851 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10852 priv->ieee->sec.flags |= SEC_ENABLED;
10853 priv->ieee->sec.enabled = sec->enabled;
10854 priv->status |= STATUS_SECURITY_UPDATED;
10855 if (sec->enabled)
10856 priv->capability |= CAP_PRIVACY_ON;
10857 else
10858 priv->capability &= ~CAP_PRIVACY_ON;
10859 }
10860
10861 if (sec->flags & SEC_ENCRYPT)
10862 priv->ieee->sec.encrypt = sec->encrypt;
10863
10864 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10865 priv->ieee->sec.level = sec->level;
10866 priv->ieee->sec.flags |= SEC_LEVEL;
10867 priv->status |= STATUS_SECURITY_UPDATED;
10868 }
10869
10870 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10871 ipw_set_hwcrypto_keys(priv);
10872
10873 /* To match current functionality of ipw2100 (which works well w/
10874 * various supplicants, we don't force a disassociate if the
10875 * privacy capability changes ... */
10876 #if 0
10877 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10878 (((priv->assoc_request.capability &
10879 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10880 (!(priv->assoc_request.capability &
10881 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10882 IPW_DEBUG_ASSOC("Disassociating due to capability "
10883 "change.\n");
10884 ipw_disassociate(priv);
10885 }
10886 #endif
10887 }
10888
10889 static int init_supported_rates(struct ipw_priv *priv,
10890 struct ipw_supported_rates *rates)
10891 {
10892 /* TODO: Mask out rates based on priv->rates_mask */
10893
10894 memset(rates, 0, sizeof(*rates));
10895 /* configure supported rates */
10896 switch (priv->ieee->freq_band) {
10897 case LIBIPW_52GHZ_BAND:
10898 rates->ieee_mode = IPW_A_MODE;
10899 rates->purpose = IPW_RATE_CAPABILITIES;
10900 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10901 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10902 break;
10903
10904 default: /* Mixed or 2.4Ghz */
10905 rates->ieee_mode = IPW_G_MODE;
10906 rates->purpose = IPW_RATE_CAPABILITIES;
10907 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10908 LIBIPW_CCK_DEFAULT_RATES_MASK);
10909 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10910 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10911 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10912 }
10913 break;
10914 }
10915
10916 return 0;
10917 }
10918
10919 static int ipw_config(struct ipw_priv *priv)
10920 {
10921 /* This is only called from ipw_up, which resets/reloads the firmware
10922 so, we don't need to first disable the card before we configure
10923 it */
10924 if (ipw_set_tx_power(priv))
10925 goto error;
10926
10927 /* initialize adapter address */
10928 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10929 goto error;
10930
10931 /* set basic system config settings */
10932 init_sys_config(&priv->sys_config);
10933
10934 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10935 * Does not support BT priority yet (don't abort or defer our Tx) */
10936 if (bt_coexist) {
10937 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10938
10939 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10940 priv->sys_config.bt_coexistence
10941 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10942 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10943 priv->sys_config.bt_coexistence
10944 |= CFG_BT_COEXISTENCE_OOB;
10945 }
10946
10947 #ifdef CONFIG_IPW2200_PROMISCUOUS
10948 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10949 priv->sys_config.accept_all_data_frames = 1;
10950 priv->sys_config.accept_non_directed_frames = 1;
10951 priv->sys_config.accept_all_mgmt_bcpr = 1;
10952 priv->sys_config.accept_all_mgmt_frames = 1;
10953 }
10954 #endif
10955
10956 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10957 priv->sys_config.answer_broadcast_ssid_probe = 1;
10958 else
10959 priv->sys_config.answer_broadcast_ssid_probe = 0;
10960
10961 if (ipw_send_system_config(priv))
10962 goto error;
10963
10964 init_supported_rates(priv, &priv->rates);
10965 if (ipw_send_supported_rates(priv, &priv->rates))
10966 goto error;
10967
10968 /* Set request-to-send threshold */
10969 if (priv->rts_threshold) {
10970 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10971 goto error;
10972 }
10973 #ifdef CONFIG_IPW2200_QOS
10974 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10975 ipw_qos_activate(priv, NULL);
10976 #endif /* CONFIG_IPW2200_QOS */
10977
10978 if (ipw_set_random_seed(priv))
10979 goto error;
10980
10981 /* final state transition to the RUN state */
10982 if (ipw_send_host_complete(priv))
10983 goto error;
10984
10985 priv->status |= STATUS_INIT;
10986
10987 ipw_led_init(priv);
10988 ipw_led_radio_on(priv);
10989 priv->notif_missed_beacons = 0;
10990
10991 /* Set hardware WEP key if it is configured. */
10992 if ((priv->capability & CAP_PRIVACY_ON) &&
10993 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10994 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10995 ipw_set_hwcrypto_keys(priv);
10996
10997 return 0;
10998
10999 error:
11000 return -EIO;
11001 }
11002
11003 /*
11004 * NOTE:
11005 *
11006 * These tables have been tested in conjunction with the
11007 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11008 *
11009 * Altering this values, using it on other hardware, or in geographies
11010 * not intended for resale of the above mentioned Intel adapters has
11011 * not been tested.
11012 *
11013 * Remember to update the table in README.ipw2200 when changing this
11014 * table.
11015 *
11016 */
11017 static const struct libipw_geo ipw_geos[] = {
11018 { /* Restricted */
11019 "---",
11020 .bg_channels = 11,
11021 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11022 {2427, 4}, {2432, 5}, {2437, 6},
11023 {2442, 7}, {2447, 8}, {2452, 9},
11024 {2457, 10}, {2462, 11}},
11025 },
11026
11027 { /* Custom US/Canada */
11028 "ZZF",
11029 .bg_channels = 11,
11030 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11031 {2427, 4}, {2432, 5}, {2437, 6},
11032 {2442, 7}, {2447, 8}, {2452, 9},
11033 {2457, 10}, {2462, 11}},
11034 .a_channels = 8,
11035 .a = {{5180, 36},
11036 {5200, 40},
11037 {5220, 44},
11038 {5240, 48},
11039 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11040 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11041 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11042 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11043 },
11044
11045 { /* Rest of World */
11046 "ZZD",
11047 .bg_channels = 13,
11048 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11049 {2427, 4}, {2432, 5}, {2437, 6},
11050 {2442, 7}, {2447, 8}, {2452, 9},
11051 {2457, 10}, {2462, 11}, {2467, 12},
11052 {2472, 13}},
11053 },
11054
11055 { /* Custom USA & Europe & High */
11056 "ZZA",
11057 .bg_channels = 11,
11058 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11059 {2427, 4}, {2432, 5}, {2437, 6},
11060 {2442, 7}, {2447, 8}, {2452, 9},
11061 {2457, 10}, {2462, 11}},
11062 .a_channels = 13,
11063 .a = {{5180, 36},
11064 {5200, 40},
11065 {5220, 44},
11066 {5240, 48},
11067 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11068 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11069 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11070 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11071 {5745, 149},
11072 {5765, 153},
11073 {5785, 157},
11074 {5805, 161},
11075 {5825, 165}},
11076 },
11077
11078 { /* Custom NA & Europe */
11079 "ZZB",
11080 .bg_channels = 11,
11081 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11082 {2427, 4}, {2432, 5}, {2437, 6},
11083 {2442, 7}, {2447, 8}, {2452, 9},
11084 {2457, 10}, {2462, 11}},
11085 .a_channels = 13,
11086 .a = {{5180, 36},
11087 {5200, 40},
11088 {5220, 44},
11089 {5240, 48},
11090 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11091 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11092 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11093 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11094 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11095 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11096 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11097 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11098 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11099 },
11100
11101 { /* Custom Japan */
11102 "ZZC",
11103 .bg_channels = 11,
11104 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11105 {2427, 4}, {2432, 5}, {2437, 6},
11106 {2442, 7}, {2447, 8}, {2452, 9},
11107 {2457, 10}, {2462, 11}},
11108 .a_channels = 4,
11109 .a = {{5170, 34}, {5190, 38},
11110 {5210, 42}, {5230, 46}},
11111 },
11112
11113 { /* Custom */
11114 "ZZM",
11115 .bg_channels = 11,
11116 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11117 {2427, 4}, {2432, 5}, {2437, 6},
11118 {2442, 7}, {2447, 8}, {2452, 9},
11119 {2457, 10}, {2462, 11}},
11120 },
11121
11122 { /* Europe */
11123 "ZZE",
11124 .bg_channels = 13,
11125 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11126 {2427, 4}, {2432, 5}, {2437, 6},
11127 {2442, 7}, {2447, 8}, {2452, 9},
11128 {2457, 10}, {2462, 11}, {2467, 12},
11129 {2472, 13}},
11130 .a_channels = 19,
11131 .a = {{5180, 36},
11132 {5200, 40},
11133 {5220, 44},
11134 {5240, 48},
11135 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11136 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11137 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11138 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11139 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11140 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11141 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11142 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11143 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11144 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11145 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11146 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11147 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11148 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11149 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11150 },
11151
11152 { /* Custom Japan */
11153 "ZZJ",
11154 .bg_channels = 14,
11155 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11156 {2427, 4}, {2432, 5}, {2437, 6},
11157 {2442, 7}, {2447, 8}, {2452, 9},
11158 {2457, 10}, {2462, 11}, {2467, 12},
11159 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11160 .a_channels = 4,
11161 .a = {{5170, 34}, {5190, 38},
11162 {5210, 42}, {5230, 46}},
11163 },
11164
11165 { /* Rest of World */
11166 "ZZR",
11167 .bg_channels = 14,
11168 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11169 {2427, 4}, {2432, 5}, {2437, 6},
11170 {2442, 7}, {2447, 8}, {2452, 9},
11171 {2457, 10}, {2462, 11}, {2467, 12},
11172 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11173 LIBIPW_CH_PASSIVE_ONLY}},
11174 },
11175
11176 { /* High Band */
11177 "ZZH",
11178 .bg_channels = 13,
11179 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11180 {2427, 4}, {2432, 5}, {2437, 6},
11181 {2442, 7}, {2447, 8}, {2452, 9},
11182 {2457, 10}, {2462, 11},
11183 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11184 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11185 .a_channels = 4,
11186 .a = {{5745, 149}, {5765, 153},
11187 {5785, 157}, {5805, 161}},
11188 },
11189
11190 { /* Custom Europe */
11191 "ZZG",
11192 .bg_channels = 13,
11193 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11194 {2427, 4}, {2432, 5}, {2437, 6},
11195 {2442, 7}, {2447, 8}, {2452, 9},
11196 {2457, 10}, {2462, 11},
11197 {2467, 12}, {2472, 13}},
11198 .a_channels = 4,
11199 .a = {{5180, 36}, {5200, 40},
11200 {5220, 44}, {5240, 48}},
11201 },
11202
11203 { /* Europe */
11204 "ZZK",
11205 .bg_channels = 13,
11206 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11207 {2427, 4}, {2432, 5}, {2437, 6},
11208 {2442, 7}, {2447, 8}, {2452, 9},
11209 {2457, 10}, {2462, 11},
11210 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11211 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11212 .a_channels = 24,
11213 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11214 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11215 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11216 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11217 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11218 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11219 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11220 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11221 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11222 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11223 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11224 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11225 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11226 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11227 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11228 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11229 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11230 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11231 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11232 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11233 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11234 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11235 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11236 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11237 },
11238
11239 { /* Europe */
11240 "ZZL",
11241 .bg_channels = 11,
11242 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11243 {2427, 4}, {2432, 5}, {2437, 6},
11244 {2442, 7}, {2447, 8}, {2452, 9},
11245 {2457, 10}, {2462, 11}},
11246 .a_channels = 13,
11247 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11248 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11249 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11250 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11251 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11252 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11253 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11254 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11255 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11256 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11257 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11258 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11259 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11260 }
11261 };
11262
11263 static void ipw_set_geo(struct ipw_priv *priv)
11264 {
11265 int j;
11266
11267 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11268 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11269 ipw_geos[j].name, 3))
11270 break;
11271 }
11272
11273 if (j == ARRAY_SIZE(ipw_geos)) {
11274 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11275 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11276 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11277 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11278 j = 0;
11279 }
11280
11281 libipw_set_geo(priv->ieee, &ipw_geos[j]);
11282 }
11283
11284 #define MAX_HW_RESTARTS 5
11285 static int ipw_up(struct ipw_priv *priv)
11286 {
11287 int rc, i;
11288
11289 /* Age scan list entries found before suspend */
11290 if (priv->suspend_time) {
11291 libipw_networks_age(priv->ieee, priv->suspend_time);
11292 priv->suspend_time = 0;
11293 }
11294
11295 if (priv->status & STATUS_EXIT_PENDING)
11296 return -EIO;
11297
11298 if (cmdlog && !priv->cmdlog) {
11299 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11300 GFP_KERNEL);
11301 if (priv->cmdlog == NULL) {
11302 IPW_ERROR("Error allocating %d command log entries.\n",
11303 cmdlog);
11304 return -ENOMEM;
11305 } else {
11306 priv->cmdlog_len = cmdlog;
11307 }
11308 }
11309
11310 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11311 /* Load the microcode, firmware, and eeprom.
11312 * Also start the clocks. */
11313 rc = ipw_load(priv);
11314 if (rc) {
11315 IPW_ERROR("Unable to load firmware: %d\n", rc);
11316 return rc;
11317 }
11318
11319 ipw_init_ordinals(priv);
11320 if (!(priv->config & CFG_CUSTOM_MAC))
11321 eeprom_parse_mac(priv, priv->mac_addr);
11322 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11323
11324 ipw_set_geo(priv);
11325
11326 if (priv->status & STATUS_RF_KILL_SW) {
11327 IPW_WARNING("Radio disabled by module parameter.\n");
11328 return 0;
11329 } else if (rf_kill_active(priv)) {
11330 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11331 "Kill switch must be turned off for "
11332 "wireless networking to work.\n");
11333 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11334 return 0;
11335 }
11336
11337 rc = ipw_config(priv);
11338 if (!rc) {
11339 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11340
11341 /* If configure to try and auto-associate, kick
11342 * off a scan. */
11343 schedule_delayed_work(&priv->request_scan, 0);
11344
11345 return 0;
11346 }
11347
11348 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11349 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11350 i, MAX_HW_RESTARTS);
11351
11352 /* We had an error bringing up the hardware, so take it
11353 * all the way back down so we can try again */
11354 ipw_down(priv);
11355 }
11356
11357 /* tried to restart and config the device for as long as our
11358 * patience could withstand */
11359 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11360
11361 return -EIO;
11362 }
11363
11364 static void ipw_bg_up(struct work_struct *work)
11365 {
11366 struct ipw_priv *priv =
11367 container_of(work, struct ipw_priv, up);
11368 mutex_lock(&priv->mutex);
11369 ipw_up(priv);
11370 mutex_unlock(&priv->mutex);
11371 }
11372
11373 static void ipw_deinit(struct ipw_priv *priv)
11374 {
11375 int i;
11376
11377 if (priv->status & STATUS_SCANNING) {
11378 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11379 ipw_abort_scan(priv);
11380 }
11381
11382 if (priv->status & STATUS_ASSOCIATED) {
11383 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11384 ipw_disassociate(priv);
11385 }
11386
11387 ipw_led_shutdown(priv);
11388
11389 /* Wait up to 1s for status to change to not scanning and not
11390 * associated (disassociation can take a while for a ful 802.11
11391 * exchange */
11392 for (i = 1000; i && (priv->status &
11393 (STATUS_DISASSOCIATING |
11394 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11395 udelay(10);
11396
11397 if (priv->status & (STATUS_DISASSOCIATING |
11398 STATUS_ASSOCIATED | STATUS_SCANNING))
11399 IPW_DEBUG_INFO("Still associated or scanning...\n");
11400 else
11401 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11402
11403 /* Attempt to disable the card */
11404 ipw_send_card_disable(priv, 0);
11405
11406 priv->status &= ~STATUS_INIT;
11407 }
11408
11409 static void ipw_down(struct ipw_priv *priv)
11410 {
11411 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11412
11413 priv->status |= STATUS_EXIT_PENDING;
11414
11415 if (ipw_is_init(priv))
11416 ipw_deinit(priv);
11417
11418 /* Wipe out the EXIT_PENDING status bit if we are not actually
11419 * exiting the module */
11420 if (!exit_pending)
11421 priv->status &= ~STATUS_EXIT_PENDING;
11422
11423 /* tell the device to stop sending interrupts */
11424 ipw_disable_interrupts(priv);
11425
11426 /* Clear all bits but the RF Kill */
11427 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11428 netif_carrier_off(priv->net_dev);
11429
11430 ipw_stop_nic(priv);
11431
11432 ipw_led_radio_off(priv);
11433 }
11434
11435 static void ipw_bg_down(struct work_struct *work)
11436 {
11437 struct ipw_priv *priv =
11438 container_of(work, struct ipw_priv, down);
11439 mutex_lock(&priv->mutex);
11440 ipw_down(priv);
11441 mutex_unlock(&priv->mutex);
11442 }
11443
11444 static int ipw_wdev_init(struct net_device *dev)
11445 {
11446 int i, rc = 0;
11447 struct ipw_priv *priv = libipw_priv(dev);
11448 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11449 struct wireless_dev *wdev = &priv->ieee->wdev;
11450
11451 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11452
11453 /* fill-out priv->ieee->bg_band */
11454 if (geo->bg_channels) {
11455 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11456
11457 bg_band->band = IEEE80211_BAND_2GHZ;
11458 bg_band->n_channels = geo->bg_channels;
11459 bg_band->channels = kcalloc(geo->bg_channels,
11460 sizeof(struct ieee80211_channel),
11461 GFP_KERNEL);
11462 if (!bg_band->channels) {
11463 rc = -ENOMEM;
11464 goto out;
11465 }
11466 /* translate geo->bg to bg_band.channels */
11467 for (i = 0; i < geo->bg_channels; i++) {
11468 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11469 bg_band->channels[i].center_freq = geo->bg[i].freq;
11470 bg_band->channels[i].hw_value = geo->bg[i].channel;
11471 bg_band->channels[i].max_power = geo->bg[i].max_power;
11472 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11473 bg_band->channels[i].flags |=
11474 IEEE80211_CHAN_PASSIVE_SCAN;
11475 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11476 bg_band->channels[i].flags |=
11477 IEEE80211_CHAN_NO_IBSS;
11478 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11479 bg_band->channels[i].flags |=
11480 IEEE80211_CHAN_RADAR;
11481 /* No equivalent for LIBIPW_CH_80211H_RULES,
11482 LIBIPW_CH_UNIFORM_SPREADING, or
11483 LIBIPW_CH_B_ONLY... */
11484 }
11485 /* point at bitrate info */
11486 bg_band->bitrates = ipw2200_bg_rates;
11487 bg_band->n_bitrates = ipw2200_num_bg_rates;
11488
11489 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11490 }
11491
11492 /* fill-out priv->ieee->a_band */
11493 if (geo->a_channels) {
11494 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11495
11496 a_band->band = IEEE80211_BAND_5GHZ;
11497 a_band->n_channels = geo->a_channels;
11498 a_band->channels = kcalloc(geo->a_channels,
11499 sizeof(struct ieee80211_channel),
11500 GFP_KERNEL);
11501 if (!a_band->channels) {
11502 rc = -ENOMEM;
11503 goto out;
11504 }
11505 /* translate geo->a to a_band.channels */
11506 for (i = 0; i < geo->a_channels; i++) {
11507 a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11508 a_band->channels[i].center_freq = geo->a[i].freq;
11509 a_band->channels[i].hw_value = geo->a[i].channel;
11510 a_band->channels[i].max_power = geo->a[i].max_power;
11511 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11512 a_band->channels[i].flags |=
11513 IEEE80211_CHAN_PASSIVE_SCAN;
11514 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11515 a_band->channels[i].flags |=
11516 IEEE80211_CHAN_NO_IBSS;
11517 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11518 a_band->channels[i].flags |=
11519 IEEE80211_CHAN_RADAR;
11520 /* No equivalent for LIBIPW_CH_80211H_RULES,
11521 LIBIPW_CH_UNIFORM_SPREADING, or
11522 LIBIPW_CH_B_ONLY... */
11523 }
11524 /* point at bitrate info */
11525 a_band->bitrates = ipw2200_a_rates;
11526 a_band->n_bitrates = ipw2200_num_a_rates;
11527
11528 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11529 }
11530
11531 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11532 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11533
11534 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11535
11536 /* With that information in place, we can now register the wiphy... */
11537 if (wiphy_register(wdev->wiphy))
11538 rc = -EIO;
11539 out:
11540 return rc;
11541 }
11542
11543 /* PCI driver stuff */
11544 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11545 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11546 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11547 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11548 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11549 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11550 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11551 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11552 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11553 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11554 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11555 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11556 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11557 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11558 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11559 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11560 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11561 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11562 {PCI_VDEVICE(INTEL, 0x104f), 0},
11563 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11564 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11565 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11566 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11567
11568 /* required last entry */
11569 {0,}
11570 };
11571
11572 MODULE_DEVICE_TABLE(pci, card_ids);
11573
11574 static struct attribute *ipw_sysfs_entries[] = {
11575 &dev_attr_rf_kill.attr,
11576 &dev_attr_direct_dword.attr,
11577 &dev_attr_indirect_byte.attr,
11578 &dev_attr_indirect_dword.attr,
11579 &dev_attr_mem_gpio_reg.attr,
11580 &dev_attr_command_event_reg.attr,
11581 &dev_attr_nic_type.attr,
11582 &dev_attr_status.attr,
11583 &dev_attr_cfg.attr,
11584 &dev_attr_error.attr,
11585 &dev_attr_event_log.attr,
11586 &dev_attr_cmd_log.attr,
11587 &dev_attr_eeprom_delay.attr,
11588 &dev_attr_ucode_version.attr,
11589 &dev_attr_rtc.attr,
11590 &dev_attr_scan_age.attr,
11591 &dev_attr_led.attr,
11592 &dev_attr_speed_scan.attr,
11593 &dev_attr_net_stats.attr,
11594 &dev_attr_channels.attr,
11595 #ifdef CONFIG_IPW2200_PROMISCUOUS
11596 &dev_attr_rtap_iface.attr,
11597 &dev_attr_rtap_filter.attr,
11598 #endif
11599 NULL
11600 };
11601
11602 static struct attribute_group ipw_attribute_group = {
11603 .name = NULL, /* put in device directory */
11604 .attrs = ipw_sysfs_entries,
11605 };
11606
11607 #ifdef CONFIG_IPW2200_PROMISCUOUS
11608 static int ipw_prom_open(struct net_device *dev)
11609 {
11610 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11611 struct ipw_priv *priv = prom_priv->priv;
11612
11613 IPW_DEBUG_INFO("prom dev->open\n");
11614 netif_carrier_off(dev);
11615
11616 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11617 priv->sys_config.accept_all_data_frames = 1;
11618 priv->sys_config.accept_non_directed_frames = 1;
11619 priv->sys_config.accept_all_mgmt_bcpr = 1;
11620 priv->sys_config.accept_all_mgmt_frames = 1;
11621
11622 ipw_send_system_config(priv);
11623 }
11624
11625 return 0;
11626 }
11627
11628 static int ipw_prom_stop(struct net_device *dev)
11629 {
11630 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11631 struct ipw_priv *priv = prom_priv->priv;
11632
11633 IPW_DEBUG_INFO("prom dev->stop\n");
11634
11635 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11636 priv->sys_config.accept_all_data_frames = 0;
11637 priv->sys_config.accept_non_directed_frames = 0;
11638 priv->sys_config.accept_all_mgmt_bcpr = 0;
11639 priv->sys_config.accept_all_mgmt_frames = 0;
11640
11641 ipw_send_system_config(priv);
11642 }
11643
11644 return 0;
11645 }
11646
11647 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11648 struct net_device *dev)
11649 {
11650 IPW_DEBUG_INFO("prom dev->xmit\n");
11651 dev_kfree_skb(skb);
11652 return NETDEV_TX_OK;
11653 }
11654
11655 static const struct net_device_ops ipw_prom_netdev_ops = {
11656 .ndo_open = ipw_prom_open,
11657 .ndo_stop = ipw_prom_stop,
11658 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11659 .ndo_change_mtu = libipw_change_mtu,
11660 .ndo_set_mac_address = eth_mac_addr,
11661 .ndo_validate_addr = eth_validate_addr,
11662 };
11663
11664 static int ipw_prom_alloc(struct ipw_priv *priv)
11665 {
11666 int rc = 0;
11667
11668 if (priv->prom_net_dev)
11669 return -EPERM;
11670
11671 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11672 if (priv->prom_net_dev == NULL)
11673 return -ENOMEM;
11674
11675 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11676 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11677 priv->prom_priv->priv = priv;
11678
11679 strcpy(priv->prom_net_dev->name, "rtap%d");
11680 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11681
11682 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11683 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11684
11685 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11686 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11687
11688 rc = register_netdev(priv->prom_net_dev);
11689 if (rc) {
11690 free_libipw(priv->prom_net_dev, 1);
11691 priv->prom_net_dev = NULL;
11692 return rc;
11693 }
11694
11695 return 0;
11696 }
11697
11698 static void ipw_prom_free(struct ipw_priv *priv)
11699 {
11700 if (!priv->prom_net_dev)
11701 return;
11702
11703 unregister_netdev(priv->prom_net_dev);
11704 free_libipw(priv->prom_net_dev, 1);
11705
11706 priv->prom_net_dev = NULL;
11707 }
11708
11709 #endif
11710
11711 static const struct net_device_ops ipw_netdev_ops = {
11712 .ndo_open = ipw_net_open,
11713 .ndo_stop = ipw_net_stop,
11714 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11715 .ndo_set_mac_address = ipw_net_set_mac_address,
11716 .ndo_start_xmit = libipw_xmit,
11717 .ndo_change_mtu = libipw_change_mtu,
11718 .ndo_validate_addr = eth_validate_addr,
11719 };
11720
11721 static int ipw_pci_probe(struct pci_dev *pdev,
11722 const struct pci_device_id *ent)
11723 {
11724 int err = 0;
11725 struct net_device *net_dev;
11726 void __iomem *base;
11727 u32 length, val;
11728 struct ipw_priv *priv;
11729 int i;
11730
11731 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11732 if (net_dev == NULL) {
11733 err = -ENOMEM;
11734 goto out;
11735 }
11736
11737 priv = libipw_priv(net_dev);
11738 priv->ieee = netdev_priv(net_dev);
11739
11740 priv->net_dev = net_dev;
11741 priv->pci_dev = pdev;
11742 ipw_debug_level = debug;
11743 spin_lock_init(&priv->irq_lock);
11744 spin_lock_init(&priv->lock);
11745 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11746 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11747
11748 mutex_init(&priv->mutex);
11749 if (pci_enable_device(pdev)) {
11750 err = -ENODEV;
11751 goto out_free_libipw;
11752 }
11753
11754 pci_set_master(pdev);
11755
11756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11757 if (!err)
11758 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11759 if (err) {
11760 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11761 goto out_pci_disable_device;
11762 }
11763
11764 pci_set_drvdata(pdev, priv);
11765
11766 err = pci_request_regions(pdev, DRV_NAME);
11767 if (err)
11768 goto out_pci_disable_device;
11769
11770 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11771 * PCI Tx retries from interfering with C3 CPU state */
11772 pci_read_config_dword(pdev, 0x40, &val);
11773 if ((val & 0x0000ff00) != 0)
11774 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11775
11776 length = pci_resource_len(pdev, 0);
11777 priv->hw_len = length;
11778
11779 base = pci_ioremap_bar(pdev, 0);
11780 if (!base) {
11781 err = -ENODEV;
11782 goto out_pci_release_regions;
11783 }
11784
11785 priv->hw_base = base;
11786 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11787 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11788
11789 err = ipw_setup_deferred_work(priv);
11790 if (err) {
11791 IPW_ERROR("Unable to setup deferred work\n");
11792 goto out_iounmap;
11793 }
11794
11795 ipw_sw_reset(priv, 1);
11796
11797 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11798 if (err) {
11799 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11800 goto out_iounmap;
11801 }
11802
11803 SET_NETDEV_DEV(net_dev, &pdev->dev);
11804
11805 mutex_lock(&priv->mutex);
11806
11807 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11808 priv->ieee->set_security = shim__set_security;
11809 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11810
11811 #ifdef CONFIG_IPW2200_QOS
11812 priv->ieee->is_qos_active = ipw_is_qos_active;
11813 priv->ieee->handle_probe_response = ipw_handle_beacon;
11814 priv->ieee->handle_beacon = ipw_handle_probe_response;
11815 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11816 #endif /* CONFIG_IPW2200_QOS */
11817
11818 priv->ieee->perfect_rssi = -20;
11819 priv->ieee->worst_rssi = -85;
11820
11821 net_dev->netdev_ops = &ipw_netdev_ops;
11822 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11823 net_dev->wireless_data = &priv->wireless_data;
11824 net_dev->wireless_handlers = &ipw_wx_handler_def;
11825 net_dev->ethtool_ops = &ipw_ethtool_ops;
11826
11827 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11828 if (err) {
11829 IPW_ERROR("failed to create sysfs device attributes\n");
11830 mutex_unlock(&priv->mutex);
11831 goto out_release_irq;
11832 }
11833
11834 if (ipw_up(priv)) {
11835 mutex_unlock(&priv->mutex);
11836 err = -EIO;
11837 goto out_remove_sysfs;
11838 }
11839
11840 mutex_unlock(&priv->mutex);
11841
11842 err = ipw_wdev_init(net_dev);
11843 if (err) {
11844 IPW_ERROR("failed to register wireless device\n");
11845 goto out_remove_sysfs;
11846 }
11847
11848 err = register_netdev(net_dev);
11849 if (err) {
11850 IPW_ERROR("failed to register network device\n");
11851 goto out_unregister_wiphy;
11852 }
11853
11854 #ifdef CONFIG_IPW2200_PROMISCUOUS
11855 if (rtap_iface) {
11856 err = ipw_prom_alloc(priv);
11857 if (err) {
11858 IPW_ERROR("Failed to register promiscuous network "
11859 "device (error %d).\n", err);
11860 unregister_netdev(priv->net_dev);
11861 goto out_unregister_wiphy;
11862 }
11863 }
11864 #endif
11865
11866 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11867 "channels, %d 802.11a channels)\n",
11868 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11869 priv->ieee->geo.a_channels);
11870
11871 return 0;
11872
11873 out_unregister_wiphy:
11874 wiphy_unregister(priv->ieee->wdev.wiphy);
11875 kfree(priv->ieee->a_band.channels);
11876 kfree(priv->ieee->bg_band.channels);
11877 out_remove_sysfs:
11878 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11879 out_release_irq:
11880 free_irq(pdev->irq, priv);
11881 out_iounmap:
11882 iounmap(priv->hw_base);
11883 out_pci_release_regions:
11884 pci_release_regions(pdev);
11885 out_pci_disable_device:
11886 pci_disable_device(pdev);
11887 pci_set_drvdata(pdev, NULL);
11888 out_free_libipw:
11889 free_libipw(priv->net_dev, 0);
11890 out:
11891 return err;
11892 }
11893
11894 static void ipw_pci_remove(struct pci_dev *pdev)
11895 {
11896 struct ipw_priv *priv = pci_get_drvdata(pdev);
11897 struct list_head *p, *q;
11898 int i;
11899
11900 if (!priv)
11901 return;
11902
11903 mutex_lock(&priv->mutex);
11904
11905 priv->status |= STATUS_EXIT_PENDING;
11906 ipw_down(priv);
11907 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11908
11909 mutex_unlock(&priv->mutex);
11910
11911 unregister_netdev(priv->net_dev);
11912
11913 if (priv->rxq) {
11914 ipw_rx_queue_free(priv, priv->rxq);
11915 priv->rxq = NULL;
11916 }
11917 ipw_tx_queue_free(priv);
11918
11919 if (priv->cmdlog) {
11920 kfree(priv->cmdlog);
11921 priv->cmdlog = NULL;
11922 }
11923
11924 /* make sure all works are inactive */
11925 cancel_delayed_work_sync(&priv->adhoc_check);
11926 cancel_work_sync(&priv->associate);
11927 cancel_work_sync(&priv->disassociate);
11928 cancel_work_sync(&priv->system_config);
11929 cancel_work_sync(&priv->rx_replenish);
11930 cancel_work_sync(&priv->adapter_restart);
11931 cancel_delayed_work_sync(&priv->rf_kill);
11932 cancel_work_sync(&priv->up);
11933 cancel_work_sync(&priv->down);
11934 cancel_delayed_work_sync(&priv->request_scan);
11935 cancel_delayed_work_sync(&priv->request_direct_scan);
11936 cancel_delayed_work_sync(&priv->request_passive_scan);
11937 cancel_delayed_work_sync(&priv->scan_event);
11938 cancel_delayed_work_sync(&priv->gather_stats);
11939 cancel_work_sync(&priv->abort_scan);
11940 cancel_work_sync(&priv->roam);
11941 cancel_delayed_work_sync(&priv->scan_check);
11942 cancel_work_sync(&priv->link_up);
11943 cancel_work_sync(&priv->link_down);
11944 cancel_delayed_work_sync(&priv->led_link_on);
11945 cancel_delayed_work_sync(&priv->led_link_off);
11946 cancel_delayed_work_sync(&priv->led_act_off);
11947 cancel_work_sync(&priv->merge_networks);
11948
11949 /* Free MAC hash list for ADHOC */
11950 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11951 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11952 list_del(p);
11953 kfree(list_entry(p, struct ipw_ibss_seq, list));
11954 }
11955 }
11956
11957 kfree(priv->error);
11958 priv->error = NULL;
11959
11960 #ifdef CONFIG_IPW2200_PROMISCUOUS
11961 ipw_prom_free(priv);
11962 #endif
11963
11964 free_irq(pdev->irq, priv);
11965 iounmap(priv->hw_base);
11966 pci_release_regions(pdev);
11967 pci_disable_device(pdev);
11968 pci_set_drvdata(pdev, NULL);
11969 /* wiphy_unregister needs to be here, before free_libipw */
11970 wiphy_unregister(priv->ieee->wdev.wiphy);
11971 kfree(priv->ieee->a_band.channels);
11972 kfree(priv->ieee->bg_band.channels);
11973 free_libipw(priv->net_dev, 0);
11974 free_firmware();
11975 }
11976
11977 #ifdef CONFIG_PM
11978 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11979 {
11980 struct ipw_priv *priv = pci_get_drvdata(pdev);
11981 struct net_device *dev = priv->net_dev;
11982
11983 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11984
11985 /* Take down the device; powers it off, etc. */
11986 ipw_down(priv);
11987
11988 /* Remove the PRESENT state of the device */
11989 netif_device_detach(dev);
11990
11991 pci_save_state(pdev);
11992 pci_disable_device(pdev);
11993 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11994
11995 priv->suspend_at = get_seconds();
11996
11997 return 0;
11998 }
11999
12000 static int ipw_pci_resume(struct pci_dev *pdev)
12001 {
12002 struct ipw_priv *priv = pci_get_drvdata(pdev);
12003 struct net_device *dev = priv->net_dev;
12004 int err;
12005 u32 val;
12006
12007 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
12008
12009 pci_set_power_state(pdev, PCI_D0);
12010 err = pci_enable_device(pdev);
12011 if (err) {
12012 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
12013 dev->name);
12014 return err;
12015 }
12016 pci_restore_state(pdev);
12017
12018 /*
12019 * Suspend/Resume resets the PCI configuration space, so we have to
12020 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12021 * from interfering with C3 CPU state. pci_restore_state won't help
12022 * here since it only restores the first 64 bytes pci config header.
12023 */
12024 pci_read_config_dword(pdev, 0x40, &val);
12025 if ((val & 0x0000ff00) != 0)
12026 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12027
12028 /* Set the device back into the PRESENT state; this will also wake
12029 * the queue of needed */
12030 netif_device_attach(dev);
12031
12032 priv->suspend_time = get_seconds() - priv->suspend_at;
12033
12034 /* Bring the device back up */
12035 schedule_work(&priv->up);
12036
12037 return 0;
12038 }
12039 #endif
12040
12041 static void ipw_pci_shutdown(struct pci_dev *pdev)
12042 {
12043 struct ipw_priv *priv = pci_get_drvdata(pdev);
12044
12045 /* Take down the device; powers it off, etc. */
12046 ipw_down(priv);
12047
12048 pci_disable_device(pdev);
12049 }
12050
12051 /* driver initialization stuff */
12052 static struct pci_driver ipw_driver = {
12053 .name = DRV_NAME,
12054 .id_table = card_ids,
12055 .probe = ipw_pci_probe,
12056 .remove = ipw_pci_remove,
12057 #ifdef CONFIG_PM
12058 .suspend = ipw_pci_suspend,
12059 .resume = ipw_pci_resume,
12060 #endif
12061 .shutdown = ipw_pci_shutdown,
12062 };
12063
12064 static int __init ipw_init(void)
12065 {
12066 int ret;
12067
12068 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12069 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12070
12071 ret = pci_register_driver(&ipw_driver);
12072 if (ret) {
12073 IPW_ERROR("Unable to initialize PCI module\n");
12074 return ret;
12075 }
12076
12077 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12078 if (ret) {
12079 IPW_ERROR("Unable to create driver sysfs file\n");
12080 pci_unregister_driver(&ipw_driver);
12081 return ret;
12082 }
12083
12084 return ret;
12085 }
12086
12087 static void __exit ipw_exit(void)
12088 {
12089 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12090 pci_unregister_driver(&ipw_driver);
12091 }
12092
12093 module_param(disable, int, 0444);
12094 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12095
12096 module_param(associate, int, 0444);
12097 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12098
12099 module_param(auto_create, int, 0444);
12100 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12101
12102 module_param_named(led, led_support, int, 0444);
12103 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12104
12105 module_param(debug, int, 0444);
12106 MODULE_PARM_DESC(debug, "debug output mask");
12107
12108 module_param_named(channel, default_channel, int, 0444);
12109 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12110
12111 #ifdef CONFIG_IPW2200_PROMISCUOUS
12112 module_param(rtap_iface, int, 0444);
12113 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12114 #endif
12115
12116 #ifdef CONFIG_IPW2200_QOS
12117 module_param(qos_enable, int, 0444);
12118 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12119
12120 module_param(qos_burst_enable, int, 0444);
12121 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12122
12123 module_param(qos_no_ack_mask, int, 0444);
12124 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12125
12126 module_param(burst_duration_CCK, int, 0444);
12127 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12128
12129 module_param(burst_duration_OFDM, int, 0444);
12130 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12131 #endif /* CONFIG_IPW2200_QOS */
12132
12133 #ifdef CONFIG_IPW2200_MONITOR
12134 module_param_named(mode, network_mode, int, 0444);
12135 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12136 #else
12137 module_param_named(mode, network_mode, int, 0444);
12138 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12139 #endif
12140
12141 module_param(bt_coexist, int, 0444);
12142 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12143
12144 module_param(hwcrypto, int, 0444);
12145 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12146
12147 module_param(cmdlog, int, 0444);
12148 MODULE_PARM_DESC(cmdlog,
12149 "allocate a ring buffer for logging firmware commands");
12150
12151 module_param(roaming, int, 0444);
12152 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12153
12154 module_param(antenna, int, 0444);
12155 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12156
12157 module_exit(ipw_exit);
12158 module_init(ipw_init);
This page took 0.285428 seconds and 6 git commands to generate.